code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available A_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''BartphoTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" from math import ceil, sqrt def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ): """simple docstring""" _snake_case : List[Any] = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: _snake_case : Any = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: _snake_case : Dict = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'''{solution() = }''')
28
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ = logging.get_logger(__name__) class lowercase( __a ): '''simple docstring''' lowercase__ = ["audio_values", "audio_mask"] def __init__( self: Dict, a_: Tuple=2_048, a_: str=1, a_: Any=[16, 16], a_: str=128, a_: int=44_100, a_: Optional[Any]=86, a_: int=2_048, a_: Optional[Any]=0.0, **a_: Union[str, Any], ): '''simple docstring''' super().__init__( feature_size=a_, sampling_rate=a_, padding_value=a_, **a_, ) _snake_case : Optional[int] = spectrogram_length _snake_case : Dict = num_channels _snake_case : int = patch_size _snake_case : Any = feature_size // self.patch_size[1] _snake_case : List[Any] = n_fft _snake_case : List[str] = sampling_rate // hop_length_to_sampling_rate _snake_case : int = sampling_rate _snake_case : Dict = padding_value _snake_case : Optional[int] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=a_, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=a_, norm="""slaney""", mel_scale="""slaney""", ).T def UpperCamelCase_ ( self: Optional[int], a_: np.array ): '''simple docstring''' _snake_case : List[Any] = spectrogram( a_, window_function(self.n_fft, """hann""" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="""dB""", db_range=80.0, ) _snake_case : Dict = log_spec[:, :-1] _snake_case : Union[str, Any] = log_spec - 20.0 _snake_case : int = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self: Any, a_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], a_: Optional[Union[str, TensorType]] = None, a_: Optional[bool] = True, a_: Optional[int] = None, a_: bool = False, a_: bool = False, **a_: str, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" f" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) _snake_case : str = isinstance(a_, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) _snake_case : Optional[Any] = is_batched_numpy or ( isinstance(a_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: _snake_case : List[str] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a_, np.ndarray ): _snake_case : int = np.asarray(a_, dtype=np.floataa ) elif isinstance(a_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _snake_case : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _snake_case : List[str] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _snake_case : Dict = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], a_ ): _snake_case : str = [np.asarray(a_, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _snake_case : Any = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _snake_case : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _snake_case : Optional[int] = np.array(a_ ).astype(np.floataa ) # convert into correct format for padding _snake_case : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _snake_case : Optional[Any] = np.ones([len(a_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _snake_case : Any = padded_audio_features * self.padding_value for i in range(len(a_ ) ): _snake_case : Optional[Any] = audio_features[i] _snake_case : Optional[int] = feature # return as BatchFeature if return_attention_mask: _snake_case : Dict = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: _snake_case : Optional[Any] = {"""audio_values""": padded_audio_features} _snake_case : int = BatchFeature(data=a_, tensor_type=a_ ) return encoded_inputs
28
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
1
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class lowercase( yaml.SafeLoader ): '''simple docstring''' def UpperCamelCase_ ( self: Union[str, Any], a_: Any ): '''simple docstring''' _snake_case : str = [self.constructed_objects[key_node] for key_node, _ in node.value] _snake_case : Any = [tuple(a_ ) if isinstance(a_, a_ ) else key for key in keys] _snake_case : Optional[int] = Counter(a_ ) _snake_case : Dict = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" ) def UpperCamelCase_ ( self: Any, a_: List[Any], a_: List[str]=False ): '''simple docstring''' _snake_case : Any = super().construct_mapping(a_, deep=a_ ) self._check_no_duplicates_on_constructed_node(a_ ) return mapping def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: _snake_case : List[Any] = full_content[1:].index("""---""" ) + 1 _snake_case : Dict = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(snake_case__ ) class lowercase( __a ): '''simple docstring''' lowercase__ = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def UpperCamelCase_ ( cls: List[Any], a_: Path ): '''simple docstring''' with open(a_, encoding="""utf-8""" ) as readme_file: _snake_case , _snake_case : List[Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(a_ ) else: return cls() def UpperCamelCase_ ( self: Tuple, a_: Path ): '''simple docstring''' if path.exists(): with open(a_, encoding="""utf-8""" ) as readme_file: _snake_case : List[str] = readme_file.read() else: _snake_case : Dict = None _snake_case : List[str] = self._to_readme(a_ ) with open(a_, """w""", encoding="""utf-8""" ) as readme_file: readme_file.write(a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[str] = None ): '''simple docstring''' if readme_content is not None: _snake_case , _snake_case : Union[str, Any] = _split_yaml_from_readme(a_ ) _snake_case : str = """---\n""" + self.to_yaml_string() + """---\n""" + content else: _snake_case : Any = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def UpperCamelCase_ ( cls: Dict, a_: str ): '''simple docstring''' _snake_case : int = yaml.load(a_, Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields _snake_case : Tuple = { (key.replace("""-""", """_""" ) if key.replace("""-""", """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return yaml.safe_dump( { (key.replace("""_""", """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() }, sort_keys=a_, allow_unicode=a_, encoding="""utf-8""", ).decode("""utf-8""" ) A_ = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser A_ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') A_ = ap.parse_args() A_ = Path(args.readme_filepath) A_ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
28
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
1
"""simple docstring""" import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = VideoToVideoSDPipeline lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} lowercase__ = PipelineTesterMixin.required_optional_params - {"latents"} lowercase__ = False # No `output_type`. lowercase__ = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def UpperCamelCase_ ( self: str ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : int = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""), up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""), cross_attention_dim=32, attention_head_dim=4, ) _snake_case : Optional[Any] = DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=a_, set_alpha_to_one=a_, ) torch.manual_seed(0 ) _snake_case : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) _snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act="""gelu""", projection_dim=512, ) _snake_case : Optional[Any] = CLIPTextModel(a_ ) _snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _snake_case : Dict = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: List[Any]=0 ): '''simple docstring''' _snake_case : List[str] = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith("""mps""" ): _snake_case : Dict = torch.manual_seed(a_ ) else: _snake_case : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) _snake_case : int = { """prompt""": """A painting of a squirrel eating a burger""", """video""": video, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator _snake_case : List[Any] = self.get_dummy_components() _snake_case : int = VideoToVideoSDPipeline(**a_ ) _snake_case : int = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) _snake_case : Optional[Any] = self.get_dummy_inputs(a_ ) _snake_case : Tuple = """np""" _snake_case : Tuple = sd_pipe(**a_ ).frames _snake_case : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) _snake_case : int = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_, expected_max_diff=5E-3 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""", torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames _snake_case : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) _snake_case : Any = torch.randn((1, 10, 3, 1_024, 576), generator=a_ ) _snake_case : Union[str, Any] = video.to("""cuda""" ) _snake_case : List[Any] = """Spiderman is surfing""" _snake_case : Any = pipe(a_, video=a_, generator=a_, num_inference_steps=3, output_type="""pt""" ).frames _snake_case : int = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
28
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' raise NotImplementedError()
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : List[str] = [1] _snake_case , _snake_case , _snake_case : Any = 0, 0, 0 _snake_case : int = ugly_nums[ia] * 2 _snake_case : List[str] = ugly_nums[ia] * 3 _snake_case : int = ugly_nums[ia] * 5 for _ in range(1 , snake_case__ ): _snake_case : Optional[int] = min(snake_case__ , snake_case__ , snake_case__ ) ugly_nums.append(snake_case__ ) if next_num == next_a: ia += 1 _snake_case : Any = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 _snake_case : Union[str, Any] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 _snake_case : Union[str, Any] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'''{ugly_numbers(2_00) = }''')
28
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
1
"""simple docstring""" import importlib.metadata import operator import re import sys from typing import Optional from packaging import version A_ = { '''<''': operator.lt, '''<=''': operator.le, '''==''': operator.eq, '''!=''': operator.ne, '''>=''': operator.ge, '''>''': operator.gt, } def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] ): """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" F" reinstalling {pkg}." ) if not ops[op](version.parse(snake_case__ ) , version.parse(snake_case__ ) ): raise ImportError( F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" _snake_case : Any = F"\n{hint}" if hint is not None else """""" # non-versioned check if re.match(R"""^[\w_\-\d]+$""" , snake_case__ ): _snake_case , _snake_case , _snake_case : Dict = requirement, None, None else: _snake_case : Tuple = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , snake_case__ ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but""" F" got {requirement}" ) _snake_case , _snake_case : Dict = match[0] _snake_case : Tuple = want_full.split(""",""" ) # there could be multiple requirements _snake_case : Dict = {} for w in want_range: _snake_case : List[Any] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , snake_case__ ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,""" F" but got {requirement}" ) _snake_case , _snake_case : Optional[Any] = match[0] _snake_case : Union[str, Any] = want_ver if op not in ops: raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" ) # special case if pkg == "python": _snake_case : List[str] = """.""".join([str(snake_case__ ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return # check if any version is installed try: _snake_case : List[Any] = importlib.metadata.version(snake_case__ ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main""" return require_version(snake_case__ , snake_case__ )
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def UpperCAmelCase__ (): """simple docstring""" _snake_case : Dict = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) _snake_case : List[Any] = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(snake_case__ ) DownloadCommand.register_subcommand(snake_case__ ) EnvironmentCommand.register_subcommand(snake_case__ ) RunCommand.register_subcommand(snake_case__ ) ServeCommand.register_subcommand(snake_case__ ) UserCommands.register_subcommand(snake_case__ ) AddNewModelCommand.register_subcommand(snake_case__ ) AddNewModelLikeCommand.register_subcommand(snake_case__ ) LfsCommands.register_subcommand(snake_case__ ) PTtoTFCommand.register_subcommand(snake_case__ ) # Let's go _snake_case : Union[str, Any] = parser.parse_args() if not hasattr(snake_case__ , """func""" ): parser.print_help() exit(1 ) # Run _snake_case : Optional[Any] = args.func(snake_case__ ) service.run() if __name__ == "__main__": main()
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class lowercase( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self: Any ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(a_ ): _snake_case : Optional[int] = AutoConfig.from_pretrained(a_ ) self.assertIsNotNone(a_ ) self.assertIsInstance(a_, a_ ) _snake_case : Dict = FlaxAutoModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) self.assertIsInstance(a_, a_ ) @slow def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(a_ ): _snake_case : Any = AutoConfig.from_pretrained(a_ ) self.assertIsNotNone(a_ ) self.assertIsInstance(a_, a_ ) _snake_case : Optional[int] = FlaxAutoModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) self.assertIsInstance(a_, a_ ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: _snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(a_ ) _snake_case : Optional[int] = FlaxBertModel.from_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer("""Do you support jax jitted function?""", return_tensors=TensorType.JAX ) @jax.jit def eval(**a_: str ): return model(**a_ ) eval(**a_ ).block_until_ready() @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: _snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(a_ ) _snake_case : List[Any] = FlaxRobertaModel.from_pretrained(a_ ) _snake_case : Tuple = tokenizer("""Do you support jax jitted function?""", return_tensors=TensorType.JAX ) @jax.jit def eval(**a_: List[Any] ): return model(**a_ ) eval(**a_ ).block_until_ready() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' with self.assertRaisesRegex( a_, """bert-base is not a local folder and is not a valid model identifier""" ): _snake_case : Union[str, Any] = FlaxAutoModel.from_pretrained("""bert-base""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' with self.assertRaisesRegex( a_, r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): _snake_case : List[Any] = FlaxAutoModel.from_pretrained(a_, revision="""aaaaaa""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' with self.assertRaisesRegex( a_, """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""", ): _snake_case : str = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' with self.assertRaisesRegex(a_, """Use `from_pt=True` to load this model""" ): _snake_case : List[str] = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" import logging import os from .state import PartialState class lowercase( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( a_: List[str] ): '''simple docstring''' _snake_case : Tuple = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def UpperCamelCase_ ( self: Optional[Any], a_: Dict, a_: str, *a_: Optional[Any], **a_: Union[str, Any] ): '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( """You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" ) _snake_case : Optional[Any] = kwargs.pop("""main_process_only""", a_ ) _snake_case : Any = kwargs.pop("""in_order""", a_ ) if self.isEnabledFor(a_ ): if self._should_log(a_ ): _snake_case , _snake_case : Tuple = self.process(a_, a_ ) self.logger.log(a_, a_, *a_, **a_ ) elif in_order: _snake_case : int = PartialState() for i in range(state.num_processes ): if i == state.process_index: _snake_case , _snake_case : List[str] = self.process(a_, a_ ) self.logger.log(a_, a_, *a_, **a_ ) state.wait_for_everyone() def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str = None ): """simple docstring""" if log_level is None: _snake_case : str = os.environ.get("""ACCELERATE_LOG_LEVEL""" , snake_case__ ) _snake_case : int = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__ , {} )
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
1
"""simple docstring""" import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "segformer" def __init__( self: Union[str, Any], a_: List[Any]=3, a_: int=4, a_: int=[2, 2, 2, 2], a_: Union[str, Any]=[8, 4, 2, 1], a_: Optional[Any]=[32, 64, 160, 256], a_: Optional[Any]=[7, 3, 3, 3], a_: Optional[int]=[4, 2, 2, 2], a_: str=[1, 2, 5, 8], a_: List[str]=[4, 4, 4, 4], a_: Tuple="gelu", a_: List[str]=0.0, a_: Dict=0.0, a_: Dict=0.1, a_: Tuple=0.02, a_: List[str]=0.1, a_: List[Any]=1E-6, a_: Union[str, Any]=256, a_: Optional[int]=255, **a_: Tuple, ): '''simple docstring''' super().__init__(**a_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""", a_, ) _snake_case : Union[str, Any] = num_channels _snake_case : Tuple = num_encoder_blocks _snake_case : Dict = depths _snake_case : Dict = sr_ratios _snake_case : Tuple = hidden_sizes _snake_case : Dict = patch_sizes _snake_case : List[str] = strides _snake_case : Optional[int] = mlp_ratios _snake_case : List[Any] = num_attention_heads _snake_case : List[Any] = hidden_act _snake_case : Dict = hidden_dropout_prob _snake_case : int = attention_probs_dropout_prob _snake_case : Union[str, Any] = classifier_dropout_prob _snake_case : str = initializer_range _snake_case : Optional[int] = drop_path_rate _snake_case : Tuple = layer_norm_eps _snake_case : Tuple = decoder_hidden_size _snake_case : Optional[Any] = kwargs.get("""reshape_last_stage""", a_ ) _snake_case : Any = semantic_loss_ignore_index class lowercase( __a ): '''simple docstring''' lowercase__ = version.parse("1.11" ) @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' return 1E-4 @property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return 12
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class lowercase( __a ): '''simple docstring''' lowercase__ = 42 class lowercase( __a , __a ): '''simple docstring''' @register_to_config def __init__( self: List[Any], a_: int = 16, a_: int = 88, a_: Optional[int] = None, a_: Optional[int] = None, a_: int = 1, a_: float = 0.0, a_: int = 32, a_: Optional[int] = None, a_: bool = False, a_: Optional[int] = None, a_: str = "geglu", a_: bool = True, a_: bool = True, ): '''simple docstring''' super().__init__() _snake_case : List[str] = num_attention_heads _snake_case : int = attention_head_dim _snake_case : Optional[int] = num_attention_heads * attention_head_dim _snake_case : Any = in_channels _snake_case : List[str] = torch.nn.GroupNorm(num_groups=a_, num_channels=a_, eps=1E-6, affine=a_ ) _snake_case : Dict = nn.Linear(a_, a_ ) # 3. Define transformers blocks _snake_case : List[Any] = nn.ModuleList( [ BasicTransformerBlock( a_, a_, a_, dropout=a_, cross_attention_dim=a_, activation_fn=a_, attention_bias=a_, double_self_attention=a_, norm_elementwise_affine=a_, ) for d in range(a_ ) ] ) _snake_case : Tuple = nn.Linear(a_, a_ ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: List[str]=None, a_: Union[str, Any]=None, a_: Tuple=None, a_: Any=1, a_: int=None, a_: bool = True, ): '''simple docstring''' _snake_case , _snake_case , _snake_case , _snake_case : Any = hidden_states.shape _snake_case : List[str] = batch_frames // num_frames _snake_case : Tuple = hidden_states _snake_case : List[str] = hidden_states[None, :].reshape(a_, a_, a_, a_, a_ ) _snake_case : Dict = hidden_states.permute(0, 2, 1, 3, 4 ) _snake_case : Tuple = self.norm(a_ ) _snake_case : Optional[int] = hidden_states.permute(0, 3, 4, 2, 1 ).reshape(batch_size * height * width, a_, a_ ) _snake_case : Any = self.proj_in(a_ ) # 2. Blocks for block in self.transformer_blocks: _snake_case : List[str] = block( a_, encoder_hidden_states=a_, timestep=a_, cross_attention_kwargs=a_, class_labels=a_, ) # 3. Output _snake_case : Tuple = self.proj_out(a_ ) _snake_case : List[str] = ( hidden_states[None, None, :] .reshape(a_, a_, a_, a_, a_ ) .permute(0, 3, 4, 1, 2 ) .contiguous() ) _snake_case : List[Any] = hidden_states.reshape(a_, a_, a_, a_ ) _snake_case : List[str] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=a_ )
28
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ): """simple docstring""" def run_func(snake_case__ : Tuple ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = random.Random() _snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 lowercase__ = "TensorFlow" @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return tf.__version__ def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_speed(_inference ) def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ ) return self._measure_speed(_train ) def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_memory(_inference ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ ) return self._measure_memory(_train ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : List[Any] = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Dict = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(a_, decoder_input_ids=a_, training=a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(a_, training=a_ ) _snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : Tuple = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : str = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Tuple = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : int = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): _snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0] _snake_case : str = tf.gradients(a_, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): _snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0] _snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables ) return gradients _snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(a_, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Dict = timeit.repeat( a_, repeat=self.args.repeat, number=10, ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ ) _snake_case : List[str] = meminfo.used _snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case : List[Any] = None else: _snake_case : int = measure_peak_memory_cpu(a_ ) _snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Tuple = stop_memory_tracing(a_ ) if memory is None: _snake_case : int = summary.total else: _snake_case : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
28
1
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase: '''simple docstring''' def __init__( self: Tuple, a_: Any, a_: Optional[Any]=13, a_: List[str]=7, a_: str=True, a_: Union[str, Any]=True, a_: Optional[Any]=True, a_: int=True, a_: str=99, a_: List[Any]=32, a_: Optional[Any]=5, a_: int=4, a_: Optional[int]=37, a_: Dict="gelu", a_: List[Any]=0.1, a_: Dict=0.1, a_: List[str]=128, a_: str=32, a_: Optional[int]=16, a_: Optional[Any]=2, a_: int=0.02, a_: Tuple=3, a_: Any=4, a_: Dict=None, ): '''simple docstring''' _snake_case : str = parent _snake_case : Dict = batch_size _snake_case : List[Any] = seq_length _snake_case : List[Any] = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : Optional[int] = use_token_type_ids _snake_case : int = use_labels _snake_case : Any = vocab_size _snake_case : Dict = hidden_size _snake_case : Union[str, Any] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : List[Any] = intermediate_size _snake_case : Union[str, Any] = hidden_act _snake_case : List[str] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Tuple = max_position_embeddings _snake_case : Union[str, Any] = type_vocab_size _snake_case : Optional[int] = type_sequence_label_size _snake_case : Optional[Any] = initializer_range _snake_case : List[str] = num_labels _snake_case : List[Any] = num_choices _snake_case : Optional[int] = scope def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) _snake_case : List[str] = None if self.use_input_mask: _snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : List[str] = None if self.use_token_type_ids: _snake_case : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) _snake_case : Optional[int] = None _snake_case : int = None _snake_case : Dict = None if self.use_labels: _snake_case : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) _snake_case : Tuple = ids_tensor([self.batch_size], self.num_choices ) _snake_case : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self: int ): '''simple docstring''' ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Union[str, Any] = self.prepare_config_and_inputs() _snake_case : int = True _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _snake_case : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: Optional[int], a_: int, a_: Dict, a_: int, a_: Any, a_: str ): '''simple docstring''' _snake_case : Dict = NezhaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_, attention_mask=a_, token_type_ids=a_ ) _snake_case : Union[str, Any] = model(a_, token_type_ids=a_ ) _snake_case : List[str] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def UpperCamelCase_ ( self: List[str], a_: Optional[Any], a_: List[str], a_: str, a_: Any, a_: Any, a_: Tuple, a_: Any, a_: Tuple, a_: Any, ): '''simple docstring''' _snake_case : str = True _snake_case : Dict = NezhaModel(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[Any] = model( a_, attention_mask=a_, token_type_ids=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, ) _snake_case : Dict = model( a_, attention_mask=a_, token_type_ids=a_, encoder_hidden_states=a_, ) _snake_case : Tuple = model(a_, attention_mask=a_, token_type_ids=a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, Any], a_: int, a_: Optional[Any], a_: str, a_: Dict, a_: Optional[int], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = NezhaForMaskedLM(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self: Tuple, a_: int, a_: Optional[Any], a_: Any, a_: int, a_: Union[str, Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = NezhaForNextSentencePrediction(config=a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model( a_, attention_mask=a_, token_type_ids=a_, labels=a_, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Union[str, Any], a_: List[str], a_: Optional[Any], a_: int, a_: int, a_: Dict ): '''simple docstring''' _snake_case : Tuple = NezhaForPreTraining(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model( a_, attention_mask=a_, token_type_ids=a_, labels=a_, next_sentence_label=a_, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: int, a_: List[Any], a_: List[str], a_: str, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : List[Any] = NezhaForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model( a_, attention_mask=a_, token_type_ids=a_, start_positions=a_, end_positions=a_, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self: int, a_: Union[str, Any], a_: str, a_: Optional[Any], a_: str, a_: List[str], a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : Any = self.num_labels _snake_case : Union[str, Any] = NezhaForSequenceClassification(a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Dict, a_: Any, a_: Dict, a_: Tuple, a_: List[Any], a_: int, a_: str, a_: Dict ): '''simple docstring''' _snake_case : int = self.num_labels _snake_case : List[Any] = NezhaForTokenClassification(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self: Any, a_: Dict, a_: Union[str, Any], a_: str, a_: int, a_: int, a_: Tuple, a_: Dict ): '''simple docstring''' _snake_case : Union[str, Any] = self.num_choices _snake_case : Optional[Any] = NezhaForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() _snake_case : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() _snake_case : List[str] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() _snake_case : int = model( a_, attention_mask=a_, token_type_ids=a_, labels=a_, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Tuple = config_and_inputs _snake_case : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase( __a , __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = True def UpperCamelCase_ ( self: Union[str, Any], a_: int, a_: List[Any], a_: List[str]=False ): '''simple docstring''' _snake_case : Any = super()._prepare_for_class(a_, a_, return_labels=a_ ) if return_labels: if model_class in get_values(a_ ): _snake_case : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=a_ ) _snake_case : str = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=a_ ) return inputs_dict def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : int = NezhaModelTester(self ) _snake_case : List[Any] = ConfigTester(self, config_class=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() _snake_case : List[Any] = None self.model_tester.create_and_check_model_as_decoder( a_, a_, a_, a_, a_, a_, a_, a_, a_, ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) @slow def UpperCamelCase_ ( self: Any ): '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = NezhaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return _snake_case : str = True _snake_case : Tuple = model_class(config=a_ ) _snake_case : Optional[int] = self._prepare_for_class(a_, a_ ) _snake_case : Optional[Any] = torch.jit.trace( a_, (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_, os.path.join(a_, """bert.pt""" ) ) _snake_case : List[str] = torch.jit.load(os.path.join(a_, """bert.pt""" ), map_location=a_ ) loaded(inputs_dict["""input_ids"""].to(a_ ), inputs_dict["""attention_mask"""].to(a_ ) ) @require_torch class lowercase( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : int = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) _snake_case : int = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _snake_case : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _snake_case : List[Any] = model(a_, attention_mask=a_ )[0] _snake_case : str = torch.Size((1, 6, 768) ) self.assertEqual(output.shape, a_ ) _snake_case : List[Any] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) _snake_case : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _snake_case : int = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _snake_case : Any = model(a_, attention_mask=a_ )[0] _snake_case : Optional[int] = torch.Size((1, 6, 21_128) ) self.assertEqual(output.shape, a_ ) _snake_case : int = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], a_, atol=1E-4 ) )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : str = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
28
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A_ = logging.get_logger(__name__) A_ = '''▁''' A_ = {'''vocab_file''': '''sentencepiece.bpe.model'''} A_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), } } A_ = { '''facebook/mbart-large-en-ro''': 10_24, '''facebook/mbart-large-cc25''': 10_24, } # fmt: off A_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = ["input_ids", "attention_mask"] lowercase__ = [] lowercase__ = [] def __init__( self: Optional[int], a_: List[Any], a_: int="<s>", a_: str="</s>", a_: Tuple="</s>", a_: Union[str, Any]="<s>", a_: Union[str, Any]="<unk>", a_: Optional[Any]="<pad>", a_: List[Any]="<mask>", a_: Optional[int]=None, a_: str=None, a_: Optional[Any]=None, a_: Optional[Dict[str, Any]] = None, a_: Union[str, Any]=None, **a_: Optional[int], ): '''simple docstring''' _snake_case : List[Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token _snake_case : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, cls_token=a_, pad_token=a_, mask_token=a_, tokenizer_file=a_, src_lang=a_, tgt_lang=a_, additional_special_tokens=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, ) _snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(a_ ) ) _snake_case : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _snake_case : List[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _snake_case : List[Any] = 1 _snake_case : List[str] = len(self.sp_model ) _snake_case : Tuple = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a_ ) } _snake_case : Any = {v: k for k, v in self.lang_code_to_id.items()} _snake_case : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _snake_case : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _snake_case : Optional[int] = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _snake_case : Union[str, Any] = src_lang if src_lang is not None else """en_XX""" _snake_case : Dict = self.lang_code_to_id[self._src_lang] _snake_case : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = self.__dict__.copy() _snake_case : str = None _snake_case : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[Any], a_: Optional[int] ): '''simple docstring''' _snake_case : int = d # for backward compatibility if not hasattr(self, """sp_model_kwargs""" ): _snake_case : Union[str, Any] = {} _snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def UpperCamelCase_ ( self: Any ): '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return self._src_lang @src_lang.setter def UpperCamelCase_ ( self: List[Any], a_: str ): '''simple docstring''' _snake_case : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase_ ( self: Any, a_: List[int], a_: Optional[List[int]] = None, a_: bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ ) _snake_case : str = [1] * len(self.prefix_tokens ) _snake_case : List[str] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(a_ )) + suffix_ones return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones def UpperCamelCase_ ( self: Dict, a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : str = [self.sep_token_id] _snake_case : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self: Optional[Any], a_: Optional[int], a_: str, a_: Optional[str], a_: Optional[str], **a_: Any ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _snake_case : List[Any] = src_lang _snake_case : Dict = self(a_, add_special_tokens=a_, return_tensors=a_, **a_ ) _snake_case : Dict = self.convert_tokens_to_ids(a_ ) _snake_case : Optional[Any] = tgt_lang_id return inputs def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self: List[Any], a_: str ): '''simple docstring''' return self.sp_model.encode(a_, out_type=a_ ) def UpperCamelCase_ ( self: Optional[int], a_: List[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _snake_case : Any = self.sp_model.PieceToId(a_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase_ ( self: Optional[int], a_: List[Any] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self: List[str], a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = """""".join(a_ ).replace(a_, """ """ ).strip() return out_string def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _snake_case : List[str] = os.path.join( a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_, """wb""" ) as fi: _snake_case : str = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,) def UpperCamelCase_ ( self: Optional[Any], a_: List[str], a_: str = "en_XX", a_: Optional[List[str]] = None, a_: str = "ro_RO", **a_: Dict, ): '''simple docstring''' _snake_case : Optional[Any] = src_lang _snake_case : int = tgt_lang return super().prepare_seqaseq_batch(a_, a_, **a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self: Optional[Any], a_: Any ): '''simple docstring''' _snake_case : List[Any] = self.lang_code_to_id[src_lang] _snake_case : Dict = [] _snake_case : List[str] = [self.eos_token_id, self.cur_lang_code] def UpperCamelCase_ ( self: Tuple, a_: str ): '''simple docstring''' _snake_case : Tuple = self.lang_code_to_id[lang] _snake_case : str = [] _snake_case : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
28
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Optional[Any] = 100 _snake_case : Any = batch_size _snake_case : List[Any] = image_size _snake_case : Optional[Any] = patch_size _snake_case : str = num_channels _snake_case : Tuple = is_training _snake_case : Tuple = use_labels _snake_case : Any = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : str = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Optional[Any] = type_sequence_label_size _snake_case : Any = initializer_range _snake_case : List[str] = scope _snake_case : int = out_indices _snake_case : Optional[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : Dict = (image_size // patch_size) ** 2 _snake_case : str = num_patches + 1 def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[Any] = None _snake_case : Tuple = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : str = BeitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.type_sequence_label_size _snake_case : Any = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Any = 1 _snake_case : str = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ): '''simple docstring''' _snake_case : List[str] = self.num_labels _snake_case : List[Any] = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _snake_case : str = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = BeitModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) _snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_, nn.Linear ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue _snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Dict = False _snake_case : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Any = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : int = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = _config_zero_init(a_ ) for model_class in self.all_model_classes: _snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ ) _snake_case : Dict = self.default_image_processor _snake_case : Dict = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ ) # prepare bool_masked_pos _snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Optional[int] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ ) _snake_case : List[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(**a_ ) _snake_case : Optional[int] = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape, a_ ) _snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : str = 281 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( a_ ) _snake_case : int = self.default_image_processor _snake_case : Optional[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 21_841) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : int = model.to(a_ ) _snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] ) _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _snake_case : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ], device=a_, ) else: _snake_case : Optional[Any] = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : List[Any] = model.to(a_ ) _snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : str = Image.open(ds[0]["""file"""] ) _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] ) _snake_case : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape, a_ )
28
1
"""simple docstring""" import numpy as np import datasets A_ = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' A_ = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' A_ = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { """X""": datasets.Sequence(datasets.Value("""float""", id="""sequence""" ), id="""X""" ), } ), ) def UpperCamelCase_ ( self: Dict, a_: Optional[int], a_: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = np.array(a_ ) _snake_case : Union[str, Any] = np.array(a_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("""Expected `X` to be a 2D vector""" ) if len(reference_distribution.shape ) != 2: raise ValueError("""Expected `reference_distribution` to be a 2D vector""" ) if reference_distribution.shape[0] < 2: raise ValueError( """Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" ) # Get mahalanobis distance for each prediction _snake_case : Optional[int] = X - np.mean(a_ ) _snake_case : Any = np.cov(reference_distribution.T ) try: _snake_case : Tuple = np.linalg.inv(a_ ) except np.linalg.LinAlgError: _snake_case : Dict = np.linalg.pinv(a_ ) _snake_case : Tuple = np.dot(a_, a_ ) _snake_case : Tuple = np.dot(a_, X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
28
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ): """simple docstring""" assert x is not None assert y is not None _snake_case : List[str] = len(snake_case__ ) _snake_case : Any = len(snake_case__ ) # declaring the array for storing the dp values _snake_case : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): _snake_case : int = 1 if x[i - 1] == y[j - 1] else 0 _snake_case : List[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) _snake_case : str = """""" _snake_case , _snake_case : Any = m, n while i > 0 and j > 0: _snake_case : List[Any] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: _snake_case : List[str] = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": A_ = '''AGGTAB''' A_ = '''GXTXAYB''' A_ = 4 A_ = '''GTAB''' A_ , A_ = longest_common_subsequence(a, b) print('''len =''', ln, ''', sub-sequence =''', subseq) import doctest doctest.testmod()
28
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
1
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() A_ = logging.get_logger('''transformers.models.speecht5''') A_ = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } A_ = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } A_ = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } A_ = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } A_ = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } A_ = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } A_ = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } A_ = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } A_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } A_ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } A_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } A_ = [] A_ = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] A_ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] A_ = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] A_ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" for attribute in key.split(""".""" ): _snake_case : List[Any] = getattr(snake_case__ , snake_case__ ) if weight_type is not None: _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape else: _snake_case : List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": _snake_case : Optional[Any] = value elif weight_type == "weight_g": _snake_case : List[str] = value elif weight_type == "weight_v": _snake_case : int = value elif weight_type == "bias": _snake_case : Tuple = value elif weight_type == "running_mean": _snake_case : Optional[Any] = value elif weight_type == "running_var": _snake_case : Optional[Any] = value elif weight_type == "num_batches_tracked": _snake_case : Optional[Any] = value else: _snake_case : int = value logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." ) def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : int ): """simple docstring""" for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: _snake_case , _snake_case : int = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : Any ): """simple docstring""" _snake_case : Tuple = [] if task == "s2t": _snake_case : int = hf_model.speechta.encoder.prenet.feature_encoder _snake_case : Tuple = MAPPING_S2T _snake_case : Union[str, Any] = IGNORE_KEYS_S2T elif task == "t2s": _snake_case : int = None _snake_case : Union[str, Any] = MAPPING_T2S _snake_case : List[str] = IGNORE_KEYS_T2S elif task == "s2s": _snake_case : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder _snake_case : Tuple = MAPPING_S2S _snake_case : Tuple = IGNORE_KEYS_S2S else: raise ValueError(F"Unsupported task: {task}" ) for name, value in fairseq_dict.items(): if should_ignore(snake_case__ , snake_case__ ): logger.info(F"{name} was ignored" ) continue _snake_case : Optional[Any] = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , ) _snake_case : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _snake_case , _snake_case : Union[str, Any] = key.split(""".*.""" ) if prefix in name and suffix in name: _snake_case : Optional[int] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _snake_case : Union[str, Any] = True if "*" in mapped_key: _snake_case : int = name.split(snake_case__ )[0].split(""".""" )[-2] _snake_case : str = mapped_key.replace("""*""" , snake_case__ ) if "weight_g" in name: _snake_case : str = """weight_g""" elif "weight_v" in name: _snake_case : Dict = """weight_v""" elif "bias" in name: _snake_case : List[str] = """bias""" elif "weight" in name: _snake_case : Optional[int] = """weight""" elif "running_mean" in name: _snake_case : int = """running_mean""" elif "running_var" in name: _snake_case : int = """running_var""" elif "num_batches_tracked" in name: _snake_case : Union[str, Any] = """num_batches_tracked""" else: _snake_case : Dict = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F"Unused weights: {unused_weights}" ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : Dict = full_name.split("""conv_layers.""" )[-1] _snake_case : Dict = name.split(""".""" ) _snake_case : Union[str, Any] = int(items[0] ) _snake_case : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) _snake_case : Optional[int] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) _snake_case : str = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) _snake_case : List[str] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) _snake_case : Optional[Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : List[Any]=None , snake_case__ : Tuple=None , snake_case__ : Dict=None , ): """simple docstring""" if config_path is not None: _snake_case : List[Any] = SpeechTaConfig.from_pretrained(snake_case__ ) else: _snake_case : List[Any] = SpeechTaConfig() if task == "s2t": _snake_case : Union[str, Any] = config.max_text_positions _snake_case : Optional[Any] = SpeechTaForSpeechToText(snake_case__ ) elif task == "t2s": _snake_case : List[Any] = 18_76 _snake_case : Tuple = 6_00 _snake_case : Union[str, Any] = config.max_speech_positions _snake_case : Dict = SpeechTaForTextToSpeech(snake_case__ ) elif task == "s2s": _snake_case : List[str] = 18_76 _snake_case : List[str] = config.max_speech_positions _snake_case : Dict = SpeechTaForSpeechToSpeech(snake_case__ ) else: raise ValueError(F"Unknown task name: {task}" ) if vocab_path: _snake_case : str = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _snake_case : Tuple = AddedToken("""<mask>""" , lstrip=snake_case__ , rstrip=snake_case__ ) _snake_case : Tuple = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) _snake_case : List[Any] = SpeechTaFeatureExtractor() _snake_case : Optional[Any] = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ ) processor.save_pretrained(snake_case__ ) _snake_case : Optional[Any] = torch.load(snake_case__ ) recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case__ , snake_case__ ) model.save_pretrained(snake_case__ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(snake_case__ ) model.push_to_hub(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A_ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ = { '''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''], '''tokenization_convbert''': ['''ConvBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''ConvBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvBertForMaskedLM''', '''ConvBertForMultipleChoice''', '''ConvBertForQuestionAnswering''', '''ConvBertForSequenceClassification''', '''ConvBertForTokenClassification''', '''ConvBertLayer''', '''ConvBertModel''', '''ConvBertPreTrainedModel''', '''load_tf_weights_in_convbert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFConvBertForMaskedLM''', '''TFConvBertForMultipleChoice''', '''TFConvBertForQuestionAnswering''', '''TFConvBertForSequenceClassification''', '''TFConvBertForTokenClassification''', '''TFConvBertLayer''', '''TFConvBertModel''', '''TFConvBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" from math import isqrt def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) ) def UpperCAmelCase__ (snake_case__ : int = 10**6 ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : List[Any] = 1 _snake_case : str = 7 while prime_candidate < max_prime: primes_count += is_prime(snake_case__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
28
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
1
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowercase( __a ): '''simple docstring''' def __init__( self: Dict, a_: Callable, a_: Optional[Features] = None, a_: str = None, a_: bool = False, a_: bool = False, a_: Optional[dict] = None, a_: Optional[int] = None, **a_: int, ): '''simple docstring''' super().__init__( features=a_, cache_dir=a_, keep_in_memory=a_, streaming=a_, num_proc=a_, **a_, ) _snake_case : List[str] = Generator( cache_dir=a_, features=a_, generator=a_, gen_kwargs=a_, **a_, ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if self.streaming: _snake_case : Optional[int] = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: _snake_case : List[Any] = None _snake_case : List[str] = None _snake_case : int = None _snake_case : Dict = None self.builder.download_and_prepare( download_config=a_, download_mode=a_, verification_mode=a_, base_path=a_, num_proc=self.num_proc, ) _snake_case : Optional[int] = self.builder.as_dataset( split="""train""", verification_mode=a_, in_memory=self.keep_in_memory ) return dataset
28
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Union[tf.Tensor, np.ndarray] ): """simple docstring""" if isinstance(snake_case__ , np.ndarray ): return list(tensor.shape ) _snake_case : List[Any] = tf.shape(snake_case__ ) if tensor.shape == tf.TensorShape(snake_case__ ): return dynamic _snake_case : int = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case__ )] def UpperCAmelCase__ (snake_case__ : tf.Tensor , snake_case__ : Optional[int] = None , snake_case__ : Optional[str] = None ): """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9 , axis=snake_case__ , name=snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Dict=1e-5 , snake_case__ : int=-1 ): """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case__ , snake_case__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized _snake_case , _snake_case : int = tf.nn.moments(snake_case__ , axes=[axis] , keepdims=snake_case__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis _snake_case : Optional[Any] = [1] * inputs.shape.rank _snake_case : Optional[Any] = shape_list(snake_case__ )[axis] _snake_case : Optional[int] = tf.reshape(snake_case__ , snake_case__ ) _snake_case : List[str] = tf.reshape(snake_case__ , snake_case__ ) # Compute layer normalization using the batch_normalization # function. _snake_case : Optional[int] = tf.nn.batch_normalization( snake_case__ , snake_case__ , snake_case__ , offset=snake_case__ , scale=snake_case__ , variance_epsilon=snake_case__ , ) return outputs def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Any=0 , snake_case__ : Tuple=-1 ): """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input _snake_case : Any = tf.shape(snake_case__ ) _snake_case : Any = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) _snake_case : int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : tf.Tensor ): """simple docstring""" if not isinstance(snake_case__ , tf.Tensor ): _snake_case : Optional[int] = tf.convert_to_tensor(snake_case__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: _snake_case : int = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: _snake_case : List[str] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) _snake_case : Optional[int] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def UpperCAmelCase__ (snake_case__ : tf.Tensor , snake_case__ : int , snake_case__ : str = "input_ids" ): """simple docstring""" tf.debugging.assert_less( snake_case__ , tf.cast(snake_case__ , dtype=tensor.dtype ) , message=( F"The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case__ )}) must be smaller than the embedding " F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ) , ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Union[str, Any] = 6_45_12 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. _snake_case : List[str] = [x for x in data if len(snake_case__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " F"bytes: {bad_attributes}" ) _snake_case : Optional[int] = np.asarray(snake_case__ ) _snake_case : Dict = 1 _snake_case : Dict = np.array_split(snake_case__ , snake_case__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 _snake_case : int = np.array_split(snake_case__ , snake_case__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case__ ): _snake_case : Any = chunk_data else: _snake_case : List[Any] = data def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" if name in group.attrs: _snake_case : Optional[Any] = [n.decode("""utf8""" ) if hasattr(snake_case__ , """decode""" ) else n for n in group.attrs[name]] else: _snake_case : List[str] = [] _snake_case : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(snake_case__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" def _expand_single_ad_tensor(snake_case__ : Union[str, Any] ): if isinstance(snake_case__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case__ )
28
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A_ = input('''Enter image url: ''').strip() print(F'''Downloading image from {url} ...''') A_ = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A_ = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] A_ = requests.get(image_url).content A_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, '''wb''') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
28
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" return [ord(snake_case__ ) - 96 for elem in plain] def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" return "".join(chr(elem + 96 ) for elem in encoded ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , snake_case__ ) print("""Decoded:""" , decode(snake_case__ ) ) if __name__ == "__main__": main()
28
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
1
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class lowercase( nn.Module ): '''simple docstring''' lowercase__ = 42 lowercase__ = jnp.floataa def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[Any] = nn.Conv( self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__( self: List[Any], a_: Optional[int] ): '''simple docstring''' _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = hidden_states.shape _snake_case : int = jax.image.resize( a_, shape=(batch, height * 2, width * 2, channels), method="""nearest""", ) _snake_case : str = self.conv(a_ ) return hidden_states class lowercase( nn.Module ): '''simple docstring''' lowercase__ = 42 lowercase__ = jnp.floataa def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = nn.Conv( self.out_channels, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__( self: str, a_: Any ): '''simple docstring''' _snake_case : int = self.conv(a_ ) return hidden_states class lowercase( nn.Module ): '''simple docstring''' lowercase__ = 42 lowercase__ = None lowercase__ = 0.0 lowercase__ = None lowercase__ = jnp.floataa def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels _snake_case : Dict = nn.GroupNorm(num_groups=32, epsilon=1E-5 ) _snake_case : Optional[Any] = nn.Conv( a_, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) _snake_case : Dict = nn.Dense(a_, dtype=self.dtype ) _snake_case : str = nn.GroupNorm(num_groups=32, epsilon=1E-5 ) _snake_case : Union[str, Any] = nn.Dropout(self.dropout_prob ) _snake_case : Optional[Any] = nn.Conv( a_, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) _snake_case : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _snake_case : int = None if use_nin_shortcut: _snake_case : Union[str, Any] = nn.Conv( a_, kernel_size=(1, 1), strides=(1, 1), padding="""VALID""", dtype=self.dtype, ) def __call__( self: List[Any], a_: Optional[int], a_: List[str], a_: str=True ): '''simple docstring''' _snake_case : int = hidden_states _snake_case : Union[str, Any] = self.norma(a_ ) _snake_case : int = nn.swish(a_ ) _snake_case : str = self.conva(a_ ) _snake_case : Any = self.time_emb_proj(nn.swish(a_ ) ) _snake_case : Any = jnp.expand_dims(jnp.expand_dims(a_, 1 ), 1 ) _snake_case : Tuple = hidden_states + temb _snake_case : Optional[Any] = self.norma(a_ ) _snake_case : str = nn.swish(a_ ) _snake_case : int = self.dropout(a_, a_ ) _snake_case : Dict = self.conva(a_ ) if self.conv_shortcut is not None: _snake_case : Dict = self.conv_shortcut(a_ ) return hidden_states + residual
28
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' raise NotImplementedError()
28
1
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
1
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin A_ = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class lowercase( unittest.TestCase , __a ): '''simple docstring''' def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = load_tool("""text-question-answering""" ) self.tool.setup() _snake_case : Any = load_tool("""text-question-answering""", remote=a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : str = self.tool(a_, """What did Hugging Face do in April 2021?""" ) self.assertEqual(a_, """launched the BigScience Research Workshop""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Union[str, Any] = self.remote_tool(a_, """What did Hugging Face do in April 2021?""" ) self.assertEqual(a_, """launched the BigScience Research Workshop""" ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = self.tool(text=a_, question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(a_, """launched the BigScience Research Workshop""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = self.remote_tool(text=a_, question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(a_, """launched the BigScience Research Workshop""" )
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record A_ = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' A_ = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' A_ = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : int ): """simple docstring""" return float((preds == labels).mean() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[Any]="binary" ): """simple docstring""" _snake_case : Any = simple_accuracy(snake_case__ , snake_case__ ) _snake_case : int = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ , average=snake_case__ ) ) return { "accuracy": acc, "f1": fa, } def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : str ): """simple docstring""" _snake_case : Tuple = {} for id_pred, label in zip(snake_case__ , snake_case__ ): _snake_case : Tuple = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}" _snake_case : str = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _snake_case : Any = [(pred, label)] _snake_case , _snake_case : Dict = [], [] for question, preds_labels in question_map.items(): _snake_case , _snake_case : Union[str, Any] = zip(*snake_case__ ) _snake_case : int = fa_score(y_true=snake_case__ , y_pred=snake_case__ , average="""macro""" ) fas.append(snake_case__ ) _snake_case : Tuple = int(sum(pred == label for pred, label in preds_labels ) == len(snake_case__ ) ) ems.append(snake_case__ ) _snake_case : List[str] = float(sum(snake_case__ ) / len(snake_case__ ) ) _snake_case : Union[str, Any] = sum(snake_case__ ) / len(snake_case__ ) _snake_case : Union[str, Any] = float(fa_score(y_true=snake_case__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), codebase_urls=[], reference_urls=[], format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None, ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCamelCase_ ( self: int, a_: Tuple, a_: int ): '''simple docstring''' if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(a_, a_ )} elif self.config_name == "cb": return acc_and_fa(a_, a_, fa_avg="""macro""" ) elif self.config_name == "record": _snake_case : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] _snake_case : int = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(a_, a_ )[0] elif self.config_name == "multirc": return evaluate_multirc(a_, a_ ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(a_, a_ )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class lowercase( __a ): '''simple docstring''' lowercase__ = None lowercase__ = None @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : str = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(a_, """feature_size""" ) ) self.assertTrue(hasattr(a_, """sampling_rate""" ) ) self.assertTrue(hasattr(a_, """padding_value""" ) ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : int = self.feat_extract_tester.prepare_inputs_for_common() _snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : List[str] = feat_extract.model_input_names[0] _snake_case : Dict = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_, processed_features[input_name] ) ) ) _snake_case : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) _snake_case : Optional[int] = BatchFeature({input_name: speech_inputs}, tensor_type="""np""" ) _snake_case : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: _snake_case : Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) _snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : Optional[int] = feat_extract.model_input_names[0] _snake_case : Dict = BatchFeature({input_name: speech_inputs}, tensor_type="""pt""" ) _snake_case : str = processed_features[input_name] if len(batch_features_input.shape ) < 3: _snake_case : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) _snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : List[str] = feat_extract.model_input_names[0] _snake_case : List[str] = BatchFeature({input_name: speech_inputs}, tensor_type="""tf""" ) _snake_case : int = processed_features[input_name] if len(batch_features_input.shape ) < 3: _snake_case : Optional[int] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def UpperCamelCase_ ( self: List[str], a_: Union[str, Any]=False ): '''simple docstring''' def _inputs_have_equal_length(a_: Union[str, Any] ): _snake_case : Dict = len(input[0] ) for input_slice in input[1:]: if len(a_ ) != length: return False return True def _inputs_are_equal(a_: List[str], a_: int ): if len(a_ ) != len(a_ ): return False for input_slice_a, input_slice_a in zip(a_, a_ ): if not np.allclose(np.asarray(a_ ), np.asarray(a_ ), atol=1E-3 ): return False return True _snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ ) _snake_case : Dict = feat_extract.model_input_names[0] _snake_case : str = BatchFeature({input_name: speech_inputs} ) _snake_case : List[str] = self.feat_extract_tester.seq_length_diff _snake_case : List[Any] = self.feat_extract_tester.max_seq_length + pad_diff _snake_case : Optional[int] = self.feat_extract_tester.min_seq_length _snake_case : Any = self.feat_extract_tester.batch_size _snake_case : Any = self.feat_extract_tester.feature_size # test padding for List[int] + numpy _snake_case : Optional[int] = feat_extract.pad(a_, padding=a_ ) _snake_case : Union[str, Any] = input_a[input_name] _snake_case : str = feat_extract.pad(a_, padding="""longest""" ) _snake_case : List[str] = input_a[input_name] _snake_case : Optional[Any] = feat_extract.pad(a_, padding="""max_length""", max_length=len(speech_inputs[-1] ) ) _snake_case : str = input_a[input_name] _snake_case : Any = feat_extract.pad(a_, padding="""longest""", return_tensors="""np""" ) _snake_case : Optional[int] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(a_ ): feat_extract.pad(a_, padding="""max_length""" )[input_name] _snake_case : str = feat_extract.pad( a_, padding="""max_length""", max_length=a_, return_tensors="""np""" ) _snake_case : List[Any] = input_a[input_name] self.assertFalse(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_are_equal(a_, a_ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy _snake_case : str = feat_extract.pad(a_, pad_to_multiple_of=10 ) _snake_case : Union[str, Any] = input_a[input_name] _snake_case : Any = feat_extract.pad(a_, padding="""longest""", pad_to_multiple_of=10 ) _snake_case : List[str] = input_a[input_name] _snake_case : Union[str, Any] = feat_extract.pad( a_, padding="""max_length""", pad_to_multiple_of=10, max_length=a_ ) _snake_case : int = input_a[input_name] _snake_case : str = feat_extract.pad( a_, padding="""max_length""", pad_to_multiple_of=10, max_length=a_, return_tensors="""np""", ) _snake_case : Tuple = input_a[input_name] self.assertTrue(all(len(a_ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(a_, a_ ) ) _snake_case : Optional[int] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(a_ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2], (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct _snake_case : Tuple = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, Any]=False ): '''simple docstring''' def _inputs_have_equal_length(a_: Union[str, Any] ): _snake_case : Union[str, Any] = len(input[0] ) for input_slice in input[1:]: if len(a_ ) != length: return False return True def _inputs_are_equal(a_: Tuple, a_: Any ): if len(a_ ) != len(a_ ): return False for input_slice_a, input_slice_a in zip(a_, a_ ): if not np.allclose(np.asarray(a_ ), np.asarray(a_ ), atol=1E-3 ): return False return True _snake_case : Any = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ ) _snake_case : List[str] = feat_extract.model_input_names[0] _snake_case : List[str] = BatchFeature({input_name: speech_inputs} ) # truncate to smallest _snake_case : Tuple = feat_extract.pad( a_, padding="""max_length""", max_length=len(speech_inputs[0] ), truncation=a_ ) _snake_case : Optional[int] = input_a[input_name] _snake_case : Tuple = feat_extract.pad(a_, padding="""max_length""", max_length=len(speech_inputs[0] ) ) _snake_case : Tuple = input_a[input_name] self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertFalse(_inputs_have_equal_length(a_ ) ) # truncate to smallest with np _snake_case : List[Any] = feat_extract.pad( a_, padding="""max_length""", max_length=len(speech_inputs[0] ), return_tensors="""np""", truncation=a_, ) _snake_case : List[Any] = input_a[input_name] _snake_case : List[str] = feat_extract.pad( a_, padding="""max_length""", max_length=len(speech_inputs[0] ), return_tensors="""np""" ) _snake_case : Any = input_a[input_name] self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(a_ ) ) # truncate to middle _snake_case : Tuple = feat_extract.pad( a_, padding="""max_length""", max_length=len(speech_inputs[1] ), truncation=a_, return_tensors="""np""", ) _snake_case : Dict = input_a[input_name] _snake_case : int = feat_extract.pad( a_, padding="""max_length""", max_length=len(speech_inputs[1] ), truncation=a_ ) _snake_case : List[Any] = input_a[input_name] _snake_case : Any = feat_extract.pad( a_, padding="""max_length""", max_length=len(speech_inputs[1] ), return_tensors="""np""" ) _snake_case : List[Any] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_are_equal(a_, a_ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(a_ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_, truncation=a_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_, padding="""longest""", truncation=a_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_, padding="""longest""", truncation=a_ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(a_ ): feat_extract.pad(a_, padding="""max_length""", truncation=a_ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy _snake_case : int = 12 _snake_case : Any = feat_extract.pad( a_, padding="""max_length""", max_length=len(speech_inputs[0] ), pad_to_multiple_of=a_, truncation=a_, ) _snake_case : Optional[int] = input_a[input_name] _snake_case : Tuple = feat_extract.pad( a_, padding="""max_length""", max_length=len(speech_inputs[0] ), pad_to_multiple_of=a_, ) _snake_case : List[Any] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of _snake_case : Tuple = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: _snake_case : List[str] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertFalse(_inputs_have_equal_length(a_ ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' self._check_padding(numpify=a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self._check_padding(numpify=a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' self._check_truncation(numpify=a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' self._check_truncation(numpify=a_ ) @require_torch def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : Dict = self.feat_extract_tester.prepare_inputs_for_common() _snake_case : Any = feat_extract.model_input_names[0] _snake_case : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) _snake_case : List[Any] = feat_extract.pad(a_, padding="""longest""", return_tensors="""np""" )[input_name] _snake_case : Tuple = feat_extract.pad(a_, padding="""longest""", return_tensors="""pt""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() _snake_case : int = feat_extract.model_input_names[0] _snake_case : Tuple = BatchFeature({input_name: speech_inputs} ) _snake_case : Optional[Any] = feat_extract.pad(a_, padding="""longest""", return_tensors="""np""" )[input_name] _snake_case : Tuple = feat_extract.pad(a_, padding="""longest""", return_tensors="""tf""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = self.feat_extract_dict _snake_case : List[Any] = True _snake_case : Optional[int] = self.feature_extraction_class(**a_ ) _snake_case : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() _snake_case : List[str] = [len(a_ ) for x in speech_inputs] _snake_case : int = feat_extract.model_input_names[0] _snake_case : Any = BatchFeature({input_name: speech_inputs} ) _snake_case : List[str] = feat_extract.pad(a_, padding="""longest""", return_tensors="""np""" ) self.assertIn("""attention_mask""", a_ ) self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.feat_extract_dict _snake_case : Dict = True _snake_case : str = self.feature_extraction_class(**a_ ) _snake_case : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() _snake_case : Optional[int] = [len(a_ ) for x in speech_inputs] _snake_case : int = feat_extract.model_input_names[0] _snake_case : Any = BatchFeature({input_name: speech_inputs} ) _snake_case : List[Any] = min(a_ ) _snake_case : Optional[Any] = feat_extract.pad( a_, padding="""max_length""", max_length=a_, truncation=a_, return_tensors="""np""" ) self.assertIn("""attention_mask""", a_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] )
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. A_ = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" config.addinivalue_line( """markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" ) config.addinivalue_line( """markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" ) config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" ) config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" ) config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" ) config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" ) def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__ ) def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main _snake_case : Union[str, Any] = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__ ) def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict ): """simple docstring""" if exitstatus == 5: _snake_case : Tuple = 0 # Doctest custom flag to ignore output. A_ = doctest.register_optionflag('''IGNORE_RESULT''') A_ = doctest.OutputChecker class lowercase( __a ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple, a_: Dict, a_: Any, a_: Tuple ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, a_, a_, a_ ) A_ = CustomOutputChecker A_ = HfDoctestModule A_ = HfDocTestParser
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A_ = logging.get_logger(__name__) @add_end_docstrings(__a ) class lowercase( __a ): '''simple docstring''' def __init__( self: int, *a_: Optional[Any], **a_: Dict ): '''simple docstring''' super().__init__(*a_, **a_ ) requires_backends(self, """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def UpperCamelCase_ ( self: List[str], a_: List[str]=None ): '''simple docstring''' _snake_case : Union[str, Any] = {} if top_k is not None: _snake_case : Optional[int] = top_k return {}, {}, postprocess_params def __call__( self: Union[str, Any], a_: Union[str, List[str], "Image.Image", List["Image.Image"]], **a_: Tuple ): '''simple docstring''' return super().__call__(a_, **a_ ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int] ): '''simple docstring''' _snake_case : Any = load_image(a_ ) _snake_case : Dict = self.image_processor(images=a_, return_tensors=self.framework ) return model_inputs def UpperCamelCase_ ( self: Dict, a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = self.model(**a_ ) return model_outputs def UpperCamelCase_ ( self: Any, a_: Dict, a_: str=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: _snake_case : int = self.model.config.num_labels if self.framework == "pt": _snake_case : Tuple = model_outputs.logits.softmax(-1 )[0] _snake_case , _snake_case : str = probs.topk(a_ ) elif self.framework == "tf": _snake_case : int = stable_softmax(model_outputs.logits, axis=-1 )[0] _snake_case : Tuple = tf.math.top_k(a_, k=a_ ) _snake_case , _snake_case : List[str] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"Unsupported framework: {self.framework}" ) _snake_case : Optional[int] = scores.tolist() _snake_case : int = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_, a_ )]
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1
"""simple docstring""" # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class lowercase( __a ): '''simple docstring''' def __init__( self: str, a_: List[Any], a_: Any ): '''simple docstring''' super().__init__() self.register_modules(unet=a_, scheduler=a_ ) @torch.no_grad() def __call__( self: str, a_: int = 1, a_: Optional[torch.Generator] = None, a_: int = 50, a_: Optional[str] = "pil", a_: bool = True, **a_: str, ): '''simple docstring''' _snake_case : List[str] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, ) _snake_case : Union[str, Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(a_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _snake_case : Union[str, Any] = self.unet(a_, a_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _snake_case : Dict = self.scheduler.step(a_, a_, a_ ).prev_sample _snake_case : Optional[int] = (image / 2 + 0.5).clamp(0, 1 ) _snake_case : Any = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": _snake_case : Union[str, Any] = self.numpy_to_pil(a_ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=a_ ), "This is a local test"
28
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
1
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 A_ = get_tests_dir('''fixtures''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[Any] = mock.Mock() _snake_case : List[Any] = 500 _snake_case : List[str] = {} _snake_case : List[Any] = HTTPError _snake_case : Optional[int] = {} # Download this model to make sure it's in the cache. _snake_case : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""", return_value=a_ ) as mock_head: _snake_case : str = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Optional[int] = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' with self.assertRaises(a_ ): # config is in subfolder, the following should not work without specifying the subfolder _snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""", subfolder="""feature_extractor""" ) self.assertIsNotNone(a_ ) @is_staging_test class lowercase( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls: Dict ): '''simple docstring''' _snake_case : Union[str, Any] = TOKEN HfFolder.save_token(a_ ) @classmethod def UpperCamelCase_ ( cls: Optional[int] ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Union[str, Any] = ViTImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""test-image-processor""", use_auth_token=self._token ) _snake_case : Tuple = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) # Reset repo delete_repo(token=self._token, repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( a_, repo_id="""test-image-processor""", push_to_hub=a_, use_auth_token=self._token ) _snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = ViTImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""valid_org/test-image-processor""", use_auth_token=self._token ) _snake_case : Union[str, Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) # Reset repo delete_repo(token=self._token, repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( a_, repo_id="""valid_org/test-image-processor-org""", push_to_hub=a_, use_auth_token=self._token ) _snake_case : List[Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) def UpperCamelCase_ ( self: str ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _snake_case : List[str] = CustomImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""test-dynamic-image-processor""", use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""}, ) _snake_case : Optional[int] = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=a_ ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, """CustomImageProcessor""" )
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ): """simple docstring""" _snake_case : List[str] = len(snake_case__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(snake_case__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , ) def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , snake_case__ , snake_case__ ) # Print all the boards for board in boards: for column in board: print(snake_case__ ) print("""""" ) print(len(snake_case__ ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
28
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ): """simple docstring""" def run_func(snake_case__ : Tuple ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = random.Random() _snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 lowercase__ = "TensorFlow" @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return tf.__version__ def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_speed(_inference ) def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ ) return self._measure_speed(_train ) def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_memory(_inference ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ ) return self._measure_memory(_train ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : List[Any] = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Dict = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(a_, decoder_input_ids=a_, training=a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(a_, training=a_ ) _snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : Tuple = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : str = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Tuple = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : int = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): _snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0] _snake_case : str = tf.gradients(a_, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): _snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0] _snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables ) return gradients _snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(a_, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Dict = timeit.repeat( a_, repeat=self.args.repeat, number=10, ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ ) _snake_case : List[str] = meminfo.used _snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case : List[Any] = None else: _snake_case : int = measure_peak_memory_cpu(a_ ) _snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Tuple = stop_memory_tracing(a_ ) if memory is None: _snake_case : int = summary.total else: _snake_case : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
28
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] _snake_case : int = DisjunctiveConstraint(a_ ) self.assertTrue(isinstance(dc.token_ids, a_ ) ) with self.assertRaises(a_ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(a_ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(a_ ): DisjunctiveConstraint(a_ ) # fails here def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = [[1, 2, 3], [1, 2, 4]] _snake_case : Union[str, Any] = DisjunctiveConstraint(a_ ) _snake_case , _snake_case , _snake_case : Optional[Any] = dc.update(1 ) _snake_case : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(a_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _snake_case , _snake_case , _snake_case : Union[str, Any] = dc.update(2 ) _snake_case : Union[str, Any] = stepped is True and completed is False and reset is False self.assertTrue(a_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _snake_case , _snake_case , _snake_case : Dict = dc.update(3 ) _snake_case : Optional[Any] = stepped is True and completed is True and reset is False self.assertTrue(a_ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] _snake_case : Union[str, Any] = DisjunctiveConstraint(a_ ) _snake_case , _snake_case , _snake_case : Any = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _snake_case , _snake_case , _snake_case : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _snake_case , _snake_case , _snake_case : Union[str, Any] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) _snake_case , _snake_case , _snake_case : str = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() _snake_case , _snake_case , _snake_case : Optional[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) _snake_case , _snake_case , _snake_case : Optional[int] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) _snake_case , _snake_case , _snake_case : Optional[Any] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : str = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
28
1
"""simple docstring""" from datetime import datetime as dt import os from github import Github A_ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Github(os.environ["""GITHUB_TOKEN"""] ) _snake_case : List[str] = g.get_repo("""huggingface/transformers""" ) _snake_case : Any = repo.get_issues(state="""open""" ) for issue in open_issues: _snake_case : Tuple = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case__ : i.created_at , reverse=snake_case__ ) _snake_case : int = comments[0] if len(snake_case__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
28
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Optional[Any] = 100 _snake_case : Any = batch_size _snake_case : List[Any] = image_size _snake_case : Optional[Any] = patch_size _snake_case : str = num_channels _snake_case : Tuple = is_training _snake_case : Tuple = use_labels _snake_case : Any = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : str = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Optional[Any] = type_sequence_label_size _snake_case : Any = initializer_range _snake_case : List[str] = scope _snake_case : int = out_indices _snake_case : Optional[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : Dict = (image_size // patch_size) ** 2 _snake_case : str = num_patches + 1 def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[Any] = None _snake_case : Tuple = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : str = BeitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.type_sequence_label_size _snake_case : Any = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Any = 1 _snake_case : str = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ): '''simple docstring''' _snake_case : List[str] = self.num_labels _snake_case : List[Any] = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _snake_case : str = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = BeitModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) _snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_, nn.Linear ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue _snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Dict = False _snake_case : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Any = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : int = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = _config_zero_init(a_ ) for model_class in self.all_model_classes: _snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ ) _snake_case : Dict = self.default_image_processor _snake_case : Dict = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ ) # prepare bool_masked_pos _snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Optional[int] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ ) _snake_case : List[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(**a_ ) _snake_case : Optional[int] = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape, a_ ) _snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : str = 281 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( a_ ) _snake_case : int = self.default_image_processor _snake_case : Optional[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 21_841) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : int = model.to(a_ ) _snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] ) _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _snake_case : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ], device=a_, ) else: _snake_case : Optional[Any] = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : List[Any] = model.to(a_ ) _snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : str = Image.open(ds[0]["""file"""] ) _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] ) _snake_case : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape, a_ )
28
1
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
1
"""simple docstring""" class lowercase: '''simple docstring''' def __init__( self: Union[str, Any], a_: Tuple ): '''simple docstring''' _snake_case : List[str] = val _snake_case : List[Any] = None _snake_case : Optional[Any] = None def UpperCamelCase_ ( self: int, a_: List[str] ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: _snake_case : List[Any] = Node(a_ ) else: self.left.insert(a_ ) elif val > self.val: if self.right is None: _snake_case : Any = Node(a_ ) else: self.right.insert(a_ ) else: _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" if root: inorder(root.left , snake_case__ ) res.append(root.val ) inorder(root.right , snake_case__ ) def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" if len(snake_case__ ) == 0: return arr _snake_case : List[str] = Node(arr[0] ) for i in range(1 , len(snake_case__ ) ): root.insert(arr[i] ) # Traverse BST in order. _snake_case : List[Any] = [] inorder(snake_case__ , snake_case__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
28
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''', }, '''merges_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''Salesforce/codegen-350M-mono''': ( '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json''' ), }, } A_ = { '''Salesforce/codegen-350M-mono''': 20_48, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] lowercase__ = CodeGenTokenizer def __init__( self: str, a_: List[str]=None, a_: str=None, a_: Any=None, a_: List[Any]="<|endoftext|>", a_: int="<|endoftext|>", a_: List[Any]="<|endoftext|>", a_: int=False, **a_: List[Any], ): '''simple docstring''' super().__init__( a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, ) if kwargs.pop("""add_bos_token""", a_ ): _snake_case : Union[str, Any] = kwargs.pop("""name_or_path""", """""" ) raise ValueError( """Currenty GPT2's fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n""" f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""" ) _snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space: _snake_case : str = getattr(a_, pre_tok_state.pop("""type""" ) ) _snake_case : Dict = add_prefix_space _snake_case : List[Any] = pre_tok_class(**a_ ) _snake_case : List[str] = add_prefix_space def UpperCamelCase_ ( self: List[Any], *a_: Tuple, **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""", a_ ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a_, **a_ ) def UpperCamelCase_ ( self: Optional[int], *a_: Dict, **a_: Optional[int] ): '''simple docstring''' _snake_case : int = kwargs.get("""is_split_into_words""", a_ ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*a_, **a_ ) def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : int = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ ) def UpperCamelCase_ ( self: List[str], a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: Optional[int], ): '''simple docstring''' _snake_case : str = super().decode( token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, ) if truncate_before_pattern is not None and len(a_ ) > 0: _snake_case : int = self.truncate(a_, a_ ) return decoded_text def UpperCamelCase_ ( self: List[Any], a_: List[str], a_: str ): '''simple docstring''' def find_re(a_: Union[str, Any], a_: List[Any], a_: List[str] ): _snake_case : str = pattern.search(a_, a_ ) return m.start() if m else -1 _snake_case : Optional[int] = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern] _snake_case : List[str] = list(re.finditer("""^print""", a_, re.MULTILINE ) ) if len(a_ ) > 1: _snake_case : List[str] = completion[: prints[1].start()] _snake_case : List[Any] = list(re.finditer("""^def""", a_, re.MULTILINE ) ) if len(a_ ) > 1: _snake_case : Optional[int] = completion[: defs[1].start()] _snake_case : str = 0 _snake_case : str = [ pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1 ] if len(a_ ) > 0: return completion[: min(a_ )] else: return completion
28
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : str ): """simple docstring""" _snake_case , _snake_case : List[str] = set(snake_case__ ), [start] while stack: _snake_case : Dict = stack.pop() explored.add(snake_case__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(snake_case__ ) return explored A_ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import argparse from collections import defaultdict import yaml A_ = '''docs/source/en/_toctree.yml''' def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : Any = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 _snake_case : Any = [key for key, value in counts.items() if value > 1] _snake_case : Optional[Any] = [] for duplicate_key in duplicates: _snake_case : Dict = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def UpperCAmelCase__ (snake_case__ : Optional[int]=False ): """simple docstring""" with open(snake_case__ , encoding="""utf-8""" ) as f: _snake_case : Union[str, Any] = yaml.safe_load(f.read() ) # Get to the API doc _snake_case : int = 0 while content[api_idx]["title"] != "API": api_idx += 1 _snake_case : List[Any] = content[api_idx]["""sections"""] # Then to the model doc _snake_case : Optional[int] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 _snake_case : List[str] = api_doc[model_idx]["""sections"""] _snake_case : List[str] = [(idx, section) for idx, section in enumerate(snake_case__ ) if """sections""" in section] _snake_case : Optional[Any] = False for idx, modality_doc in modalities_docs: _snake_case : int = modality_doc["""sections"""] _snake_case : Tuple = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: _snake_case : Optional[Any] = True if overwrite: _snake_case : Optional[int] = new_modality_doc if diff: if overwrite: _snake_case : Optional[int] = model_doc _snake_case : Union[str, Any] = api_doc with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A_ = parser.parse_args() check_model_doc(args.fix_and_overwrite)
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" from typing import Any class lowercase: '''simple docstring''' def __init__( self: Union[str, Any], a_: Any ): '''simple docstring''' _snake_case : int = data _snake_case : List[str] = None class lowercase: '''simple docstring''' def __init__( self: Dict ): '''simple docstring''' _snake_case : str = None def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : str = self.head while temp is not None: print(temp.data, end=""" """ ) _snake_case : List[Any] = temp.next print() def UpperCamelCase_ ( self: Optional[int], a_: Any ): '''simple docstring''' _snake_case : Dict = Node(a_ ) _snake_case : str = self.head _snake_case : List[Any] = new_node def UpperCamelCase_ ( self: Optional[Any], a_: Any, a_: Union[str, Any] ): '''simple docstring''' if node_data_a == node_data_a: return else: _snake_case : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: _snake_case : Optional[Any] = node_a.next _snake_case : str = self.head while node_a is not None and node_a.data != node_data_a: _snake_case : Union[str, Any] = node_a.next if node_a is None or node_a is None: return _snake_case , _snake_case : Dict = node_a.data, node_a.data if __name__ == "__main__": A_ = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
28
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
1
"""simple docstring""" from jiwer import compute_measures import datasets A_ = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' A_ = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' A_ = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self: Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { """predictions""": datasets.Value("""string""", id="""sequence""" ), """references""": datasets.Value("""string""", id="""sequence""" ), } ), codebase_urls=["""https://github.com/jitsi/jiwer/"""], reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ], ) def UpperCamelCase_ ( self: Optional[int], a_: Any=None, a_: int=None, a_: str=False ): '''simple docstring''' if concatenate_texts: return compute_measures(a_, a_ )["wer"] else: _snake_case : int = 0 _snake_case : int = 0 for prediction, reference in zip(a_, a_ ): _snake_case : Optional[int] = compute_measures(a_, a_ ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
28
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers A_ = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = os.path.dirname(os.path.realpath(snake_case__ ) ) _snake_case : Any = os.path.join(snake_case__ , """words.txt""" ) _snake_case : Any = """""" with open(snake_case__ ) as f: _snake_case : Any = f.readline() _snake_case : Union[str, Any] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] _snake_case : int = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
28
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def UpperCAmelCase__ (snake_case__ : Namespace ): """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) A_ = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class lowercase( __a ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' _snake_case : Union[str, Any] = parser.add_parser( """convert""", help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""", ) train_parser.add_argument("""--model_type""", type=a_, required=a_, help="""Model's type.""" ) train_parser.add_argument( """--tf_checkpoint""", type=a_, required=a_, help="""TensorFlow checkpoint path or folder.""" ) train_parser.add_argument( """--pytorch_dump_output""", type=a_, required=a_, help="""Path to the PyTorch saved model output.""" ) train_parser.add_argument("""--config""", type=a_, default="""""", help="""Configuration file path or folder.""" ) train_parser.add_argument( """--finetuning_task_name""", type=a_, default=a_, help="""Optional fine-tuning task name if the TF model was a finetuned model.""", ) train_parser.set_defaults(func=a_ ) def __init__( self: Any, a_: str, a_: str, a_: str, a_: str, a_: str, *a_: int, ): '''simple docstring''' _snake_case : str = logging.get_logger("""transformers-cli/converting""" ) self._logger.info(f"Loading model {model_type}" ) _snake_case : Any = model_type _snake_case : Union[str, Any] = tf_checkpoint _snake_case : List[Any] = pytorch_dump_output _snake_case : List[Any] = config _snake_case : Optional[int] = finetuning_task_name def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(a_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a_ ) if "ckpt" in self._tf_checkpoint.lower(): _snake_case : Tuple = self._tf_checkpoint _snake_case : Any = """""" else: _snake_case : Any = self._tf_checkpoint _snake_case : Dict = """""" convert_transfo_xl_checkpoint_to_pytorch( a_, self._config, self._pytorch_dump_output, a_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) else: raise ValueError( """--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
28
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class lowercase( unittest.TestCase ): '''simple docstring''' def __init__( self: List[Any], a_: Union[str, Any], a_: Tuple=7, a_: List[Any]=3, a_: int=30, a_: Dict=400, a_: str=True, a_: Optional[int]=None, a_: int=True, a_: Optional[Any]=[0.5, 0.5, 0.5], a_: Any=[0.5, 0.5, 0.5], a_: List[Any]=True, a_: Union[str, Any]=1 / 255, a_: List[Any]=True, ): '''simple docstring''' _snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333} _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : str = num_channels _snake_case : List[Any] = min_resolution _snake_case : Tuple = max_resolution _snake_case : Union[str, Any] = do_resize _snake_case : List[str] = size _snake_case : Union[str, Any] = do_normalize _snake_case : List[Any] = image_mean _snake_case : List[str] = image_std _snake_case : Optional[int] = do_rescale _snake_case : List[str] = rescale_factor _snake_case : List[Any] = do_pad def UpperCamelCase_ ( self: int ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self: Any, a_: Any, a_: Tuple=False ): '''simple docstring''' if not batched: _snake_case : int = image_inputs[0] if isinstance(a_, Image.Image ): _snake_case , _snake_case : Optional[int] = image.size else: _snake_case , _snake_case : Dict = image.shape[1], image.shape[2] if w < h: _snake_case : List[Any] = int(self.size["""shortest_edge"""] * h / w ) _snake_case : str = self.size["""shortest_edge"""] elif w > h: _snake_case : Any = self.size["""shortest_edge"""] _snake_case : Dict = int(self.size["""shortest_edge"""] * w / h ) else: _snake_case : Dict = self.size["""shortest_edge"""] _snake_case : Dict = self.size["""shortest_edge"""] else: _snake_case : Union[str, Any] = [] for image in image_inputs: _snake_case , _snake_case : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _snake_case : List[Any] = max(a_, key=lambda a_ : item[0] )[0] _snake_case : Union[str, Any] = max(a_, key=lambda a_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Optional[int] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_, """image_mean""" ) ) self.assertTrue(hasattr(a_, """image_std""" ) ) self.assertTrue(hasattr(a_, """do_normalize""" ) ) self.assertTrue(hasattr(a_, """do_resize""" ) ) self.assertTrue(hasattr(a_, """size""" ) ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"""shortest_edge""": 18, """longest_edge""": 1_333} ) self.assertEqual(image_processor.do_pad, a_ ) _snake_case : Dict = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=a_ ) self.assertEqual(image_processor.size, {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad, a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_, Image.Image ) # Test not batched input _snake_case : Optional[Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values _snake_case , _snake_case : Dict = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched _snake_case , _snake_case : Dict = self.image_processor_tester.get_expected_values(a_, batched=a_ ) _snake_case : Any = image_processing(a_, return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_, np.ndarray ) # Test not batched input _snake_case : Any = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values _snake_case , _snake_case : List[Any] = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched _snake_case : Tuple = image_processing(a_, return_tensors="""pt""" ).pixel_values _snake_case , _snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(a_, batched=a_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_, torch.Tensor ) # Test not batched input _snake_case : Dict = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values _snake_case , _snake_case : str = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched _snake_case : List[str] = image_processing(a_, return_tensors="""pt""" ).pixel_values _snake_case , _snake_case : List[str] = self.image_processor_tester.get_expected_values(a_, batched=a_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) _snake_case : Dict = self.image_processing_class(do_resize=a_, do_normalize=a_, do_rescale=a_ ) # create random PyTorch tensors _snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors _snake_case : int = image_processing_a.pad(a_, return_tensors="""pt""" ) _snake_case : int = image_processing_a(a_, return_tensors="""pt""" ) self.assertTrue( torch.allclose(encoded_images_with_method["""pixel_values"""], encoded_images["""pixel_values"""], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""", """r""" ) as f: _snake_case : int = json.loads(f.read() ) _snake_case : int = {"""image_id""": 39_769, """annotations""": target} # encode them _snake_case : Optional[Any] = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" ) _snake_case : int = image_processing(images=a_, annotations=a_, return_tensors="""pt""" ) # verify pixel values _snake_case : Any = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape, a_ ) _snake_case : int = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], a_, atol=1E-4 ) ) # verify area _snake_case : List[str] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], a_ ) ) # verify boxes _snake_case : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, a_ ) _snake_case : Union[str, Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], a_, atol=1E-3 ) ) # verify image_id _snake_case : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], a_ ) ) # verify is_crowd _snake_case : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], a_ ) ) # verify class_labels _snake_case : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], a_ ) ) # verify orig_size _snake_case : Union[str, Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], a_ ) ) # verify size _snake_case : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], a_ ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""", """r""" ) as f: _snake_case : Any = json.loads(f.read() ) _snake_case : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target} _snake_case : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them _snake_case : List[Any] = YolosImageProcessor(format="""coco_panoptic""" ) _snake_case : str = image_processing(images=a_, annotations=a_, masks_path=a_, return_tensors="""pt""" ) # verify pixel values _snake_case : Union[str, Any] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape, a_ ) _snake_case : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], a_, atol=1E-4 ) ) # verify area _snake_case : Any = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], a_ ) ) # verify boxes _snake_case : int = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, a_ ) _snake_case : str = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], a_, atol=1E-3 ) ) # verify image_id _snake_case : int = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], a_ ) ) # verify is_crowd _snake_case : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], a_ ) ) # verify class_labels _snake_case : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], a_ ) ) # verify masks _snake_case : Tuple = 822_873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item(), a_ ) # verify orig_size _snake_case : Optional[int] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], a_ ) ) # verify size _snake_case : List[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], a_ ) )
28
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
1
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=snake_case__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=snake_case__ ) return parser.parse_args() def UpperCAmelCase__ (): """simple docstring""" _snake_case : int = parse_args() # Import training_script as a module. _snake_case : int = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _snake_case : List[str] = script_fpath.stem _snake_case : Any = importlib.import_module(snake_case__ ) # Patch sys.argv _snake_case : int = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
28
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' raise NotImplementedError()
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) _snake_case : Any = str(bin(snake_case__ ) ) binary_number += "0" * shift_amount return binary_number def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) _snake_case : Any = str(bin(snake_case__ ) )[2:] if shift_amount >= len(snake_case__ ): return "0b0" _snake_case : List[Any] = binary_number[: len(snake_case__ ) - shift_amount] return "0b" + shifted_binary_number def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" if number >= 0: # Get binary representation of positive number _snake_case : Dict = """0""" + str(bin(snake_case__ ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number _snake_case : List[str] = len(bin(snake_case__ )[3:] ) # Find 2's complement of number _snake_case : List[Any] = bin(abs(snake_case__ ) - (1 << binary_number_length) )[3:] _snake_case : str = ( """1""" + """0""" * (binary_number_length - len(snake_case__ )) + binary_number ) if shift_amount >= len(snake_case__ ): return "0b" + binary_number[0] * len(snake_case__ ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(snake_case__ ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
1
"""simple docstring""" import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: str, a_: List[Any] ): '''simple docstring''' self.assertEqual(len(a_ ), len(a_ ) ) for a, b in zip(a_, a_ ): self.assertAlmostEqual(a_, a_, delta=a_ ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Tuple = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(a_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step, 3 ) self.assertEqual(len(accumulator.gradients ), 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1E-2 ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[Any] = None ops.enable_eager_execution_internal() _snake_case : Any = tf.config.list_physical_devices("""CPU""" ) if len(a_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) _snake_case : Any = tf.config.list_logical_devices(device_type="""CPU""" ) _snake_case : str = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): _snake_case : Union[str, Any] = GradientAccumulator() _snake_case : str = tf.Variable([4.0, 3.0] ) _snake_case , _snake_case : str = create_optimizer(5E-5, 10, 5 ) _snake_case : int = tf.Variable([0.0, 0.0], trainable=a_ ) def accumulate_on_replica(a_: Any ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) ) @tf.function def accumulate(a_: Optional[Any], a_: str ): with strategy.scope(): _snake_case : List[str] = strategy.experimental_local_results(a_ ) local_variables[0].assign(a_ ) local_variables[1].assign(a_ ) strategy.run(a_, args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(a_ ) def _check_local_values(a_: str, a_: Optional[int] ): _snake_case : str = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value(), a_, tol=1E-2 ) self.assertListAlmostEqual(values[1].value(), a_, tol=1E-2 ) accumulate([1.0, 2.0], [-1.0, 1.0] ) accumulate([3.0, -1.0], [-1.0, -1.0] ) accumulate([-2.0, 2.0], [3.0, -2.0] ) self.assertEqual(accumulator.step, 3 ) _check_local_values([2.0, 3.0], [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) _check_local_values([0.0, 0.0], [0.0, 0.0] )
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor A_ = logging.get_logger(__name__) class lowercase( __a ): '''simple docstring''' def __init__( self: List[str], *a_: str, **a_: Tuple ): '''simple docstring''' warnings.warn( """The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use VideoMAEImageProcessor instead.""", a_, ) super().__init__(*a_, **a_ )
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list[int | str] ): """simple docstring""" create_state_space_tree(snake_case__ , [] , 0 , [0 for i in range(len(snake_case__ ) )] ) def UpperCAmelCase__ (snake_case__ : list[int | str] , snake_case__ : list[int | str] , snake_case__ : int , snake_case__ : list[int] , ): """simple docstring""" if index == len(snake_case__ ): print(snake_case__ ) return for i in range(len(snake_case__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) _snake_case : Optional[Any] = True create_state_space_tree(snake_case__ , snake_case__ , index + 1 , snake_case__ ) current_sequence.pop() _snake_case : Optional[int] = False A_ = [3, 1, 2, 4] generate_all_permutations(sequence) A_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging A_ = logging.get_logger(__name__) class lowercase( __a ): '''simple docstring''' lowercase__ = ["pixel_values"] def __init__( self: str, a_: bool = True, a_: Dict[str, int] = None, a_: PILImageResampling = PILImageResampling.BICUBIC, a_: bool = True, a_: Dict[str, int] = None, a_: bool = True, a_: Union[int, float] = 1 / 255, a_: bool = True, a_: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, a_: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **a_: str, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} _snake_case : str = get_size_dict(a_, default_to_square=a_ ) _snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _snake_case : List[str] = get_size_dict(a_, param_name="""crop_size""" ) _snake_case : int = do_resize _snake_case : str = size _snake_case : List[Any] = resample _snake_case : List[Any] = do_center_crop _snake_case : Any = crop_size _snake_case : Union[str, Any] = do_rescale _snake_case : str = rescale_factor _snake_case : Union[str, Any] = do_normalize _snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _snake_case : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase_ ( self: Tuple, a_: np.ndarray, a_: Dict[str, int], a_: PILImageResampling = PILImageResampling.BICUBIC, a_: Optional[Union[str, ChannelDimension]] = None, **a_: List[Any], ): '''simple docstring''' _snake_case : List[Any] = get_size_dict(a_, default_to_square=a_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _snake_case : List[str] = int((256 / 224) * size["""shortest_edge"""] ) _snake_case : Tuple = get_resize_output_image_size(a_, size=a_, default_to_square=a_ ) _snake_case : Dict = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" ) return resize( a_, size=(size_dict["""height"""], size_dict["""width"""]), resample=a_, data_format=a_, **a_ ) def UpperCamelCase_ ( self: List[Any], a_: np.ndarray, a_: Dict[str, int], a_: Optional[Union[str, ChannelDimension]] = None, **a_: Any, ): '''simple docstring''' _snake_case : Dict = get_size_dict(a_ ) if "height" not in size or "width" not in size: raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" ) return center_crop(a_, size=(size["""height"""], size["""width"""]), data_format=a_, **a_ ) def UpperCamelCase_ ( self: Dict, a_: np.ndarray, a_: Union[int, float], a_: Optional[Union[str, ChannelDimension]] = None, **a_: str, ): '''simple docstring''' return rescale(a_, scale=a_, data_format=a_, **a_ ) def UpperCamelCase_ ( self: Optional[int], a_: np.ndarray, a_: Union[float, List[float]], a_: Union[float, List[float]], a_: Optional[Union[str, ChannelDimension]] = None, **a_: Tuple, ): '''simple docstring''' return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ ) def UpperCamelCase_ ( self: Dict, a_: ImageInput, a_: Optional[bool] = None, a_: Optional[Dict[str, int]] = None, a_: PILImageResampling = None, a_: Optional[bool] = None, a_: Optional[Dict[str, int]] = None, a_: Optional[bool] = None, a_: Optional[float] = None, a_: Optional[bool] = None, a_: Optional[Union[float, Iterable[float]]] = None, a_: Optional[Union[float, Iterable[float]]] = None, a_: Optional[TensorType] = None, a_: ChannelDimension = ChannelDimension.FIRST, **a_: List[str], ): '''simple docstring''' _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : Union[str, Any] = resample if resample is not None else self.resample _snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop _snake_case : Dict = do_rescale if do_rescale is not None else self.do_rescale _snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _snake_case : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _snake_case : Tuple = image_mean if image_mean is not None else self.image_mean _snake_case : int = image_std if image_std is not None else self.image_std _snake_case : Union[str, Any] = size if size is not None else self.size _snake_case : Tuple = get_size_dict(a_, default_to_square=a_ ) _snake_case : str = crop_size if crop_size is not None else self.crop_size _snake_case : int = get_size_dict(a_, param_name="""crop_size""" ) _snake_case : Optional[Any] = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _snake_case : Any = [to_numpy_array(a_ ) for image in images] if do_resize: _snake_case : List[Any] = [self.resize(a_, a_, a_ ) for image in images] if do_center_crop: _snake_case : str = [self.center_crop(a_, a_ ) for image in images] if do_rescale: _snake_case : Optional[Any] = [self.rescale(a_, a_ ) for image in images] if do_normalize: _snake_case : Optional[int] = [self.normalize(a_, a_, a_ ) for image in images] _snake_case : Tuple = [to_channel_dimension_format(a_, a_ ) for image in images] _snake_case : Tuple = {"""pixel_values""": images} return BatchFeature(data=a_, tensor_type=a_ )
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : List[Any] = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _snake_case : Dict = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) _snake_case : List[str] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) _snake_case : List[str] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) _snake_case : Union[str, Any] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) _snake_case : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) _snake_case : List[str] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) _snake_case : Optional[int] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) _snake_case : str = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) _snake_case : str = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) _snake_case : str = key.replace("""image_encoder.module""" , """flava.image_model""" ) _snake_case : Dict = key.replace("""text_encoder.module""" , """flava.text_model""" ) _snake_case : List[str] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) _snake_case : Any = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) _snake_case : List[Any] = key.replace("""text_projection""" , """flava.text_projection""" ) _snake_case : Dict = key.replace("""image_projection""" , """flava.image_projection""" ) _snake_case : Tuple = value.float() for key, value in codebook_state_dict.items(): _snake_case : Optional[Any] = value return upgrade @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any]=None ): """simple docstring""" if config_path is not None: _snake_case : str = FlavaConfig.from_pretrained(snake_case__ ) else: _snake_case : int = FlavaConfig() _snake_case : int = FlavaForPreTraining(snake_case__ ).eval() _snake_case : Tuple = convert_dalle_checkpoint(snake_case__ , snake_case__ , save_checkpoint=snake_case__ ) if os.path.exists(snake_case__ ): _snake_case : int = torch.load(snake_case__ , map_location="""cpu""" ) else: _snake_case : List[Any] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" ) _snake_case : Any = upgrade_state_dict(snake_case__ , snake_case__ ) hf_model.load_state_dict(snake_case__ ) _snake_case : List[Any] = hf_model.state_dict() _snake_case : Union[str, Any] = count_parameters(snake_case__ ) _snake_case : Optional[Any] = count_parameters(snake_case__ ) + count_parameters(snake_case__ ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) hf_model.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') A_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ): """simple docstring""" _snake_case : Tuple = set(range(3 , snake_case__ , 2 ) ) primes.add(2 ) for p in range(3 , snake_case__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) ) _snake_case : Union[str, Any] = [float(snake_case__ ) for n in range(limit + 1 )] for p in primes: for n in range(snake_case__ , limit + 1 , snake_case__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
28
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
1
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[str] = 0 if start < end: _snake_case : Tuple = randint(snake_case__ , snake_case__ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Optional[Any] = temp _snake_case , _snake_case : Dict = _in_place_partition(snake_case__ , snake_case__ , snake_case__ ) count += _in_place_quick_sort(snake_case__ , snake_case__ , p - 1 ) count += _in_place_quick_sort(snake_case__ , p + 1 , snake_case__ ) return count def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : int ): """simple docstring""" _snake_case : Any = 0 _snake_case : List[Any] = randint(snake_case__ , snake_case__ ) _snake_case : Union[str, Any] = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Tuple = temp _snake_case : List[Any] = start - 1 for index in range(snake_case__ , snake_case__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Tuple = new_pivot_index + 1 _snake_case : List[str] = a[new_pivot_index] _snake_case : List[Any] = a[index] _snake_case : List[Any] = temp _snake_case : str = a[new_pivot_index + 1] _snake_case : Dict = a[end] _snake_case : Dict = temp return new_pivot_index + 1, count A_ = TemporaryFile() A_ = 1_00 # 1000 elements are to be sorted A_ , A_ = 0, 1 # mean and standard deviation A_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array A_ = np.load(outfile) A_ = len(M) - 1 A_ = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
1
"""simple docstring""" import numpy as np def UpperCAmelCase__ (snake_case__ : np.array ): """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ): """simple docstring""" def run_func(snake_case__ : Tuple ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = random.Random() _snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 lowercase__ = "TensorFlow" @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return tf.__version__ def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_speed(_inference ) def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ ) return self._measure_speed(_train ) def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_memory(_inference ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ ) return self._measure_memory(_train ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : List[Any] = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Dict = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(a_, decoder_input_ids=a_, training=a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(a_, training=a_ ) _snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : Tuple = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : str = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Tuple = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : int = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): _snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0] _snake_case : str = tf.gradients(a_, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): _snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0] _snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables ) return gradients _snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(a_, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Dict = timeit.repeat( a_, repeat=self.args.repeat, number=10, ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ ) _snake_case : List[str] = meminfo.used _snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case : List[Any] = None else: _snake_case : int = measure_peak_memory_cpu(a_ ) _snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Tuple = stop_memory_tracing(a_ ) if memory is None: _snake_case : int = summary.total else: _snake_case : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
28
1
"""simple docstring""" from timeit import timeit def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if number < 0: raise ValueError("""the value of input must not be negative""" ) _snake_case : int = 0 while number: number &= number - 1 result += 1 return result def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if number < 0: raise ValueError("""the value of input must not be negative""" ) _snake_case : Tuple = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def UpperCAmelCase__ (): """simple docstring""" def do_benchmark(snake_case__ : int ) -> None: _snake_case : int = """import __main__ as z""" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(snake_case__ ) = }" ) _snake_case : Any = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=snake_case__ ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(snake_case__ ) = }" ) _snake_case : Optional[int] = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=snake_case__ , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(snake_case__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : str = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Optional[Any] = 100 _snake_case : Any = batch_size _snake_case : List[Any] = image_size _snake_case : Optional[Any] = patch_size _snake_case : str = num_channels _snake_case : Tuple = is_training _snake_case : Tuple = use_labels _snake_case : Any = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : str = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Optional[Any] = type_sequence_label_size _snake_case : Any = initializer_range _snake_case : List[str] = scope _snake_case : int = out_indices _snake_case : Optional[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : Dict = (image_size // patch_size) ** 2 _snake_case : str = num_patches + 1 def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[Any] = None _snake_case : Tuple = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : str = BeitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.type_sequence_label_size _snake_case : Any = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Any = 1 _snake_case : str = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ): '''simple docstring''' _snake_case : List[str] = self.num_labels _snake_case : List[Any] = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _snake_case : str = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = BeitModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) _snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_, nn.Linear ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue _snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Dict = False _snake_case : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Any = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : int = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = _config_zero_init(a_ ) for model_class in self.all_model_classes: _snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ ) _snake_case : Dict = self.default_image_processor _snake_case : Dict = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ ) # prepare bool_masked_pos _snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Optional[int] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ ) _snake_case : List[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(**a_ ) _snake_case : Optional[int] = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape, a_ ) _snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : str = 281 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( a_ ) _snake_case : int = self.default_image_processor _snake_case : Optional[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 21_841) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : int = model.to(a_ ) _snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] ) _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _snake_case : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ], device=a_, ) else: _snake_case : Optional[Any] = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : List[Any] = model.to(a_ ) _snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : str = Image.open(ds[0]["""file"""] ) _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] ) _snake_case : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape, a_ )
28
1
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def UpperCAmelCase__ (snake_case__ : Dict ): """simple docstring""" _snake_case , _snake_case : Any = image.size _snake_case , _snake_case : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _snake_case : int = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) _snake_case : Tuple = np.array(snake_case__ ).astype(np.floataa ) / 2_55.0 _snake_case : str = image[None].transpose(0 , 3 , 1 , 2 ) _snake_case : str = torch.from_numpy(snake_case__ ) return 2.0 * image - 1.0 class lowercase( __a ): '''simple docstring''' def __init__( self: Any, a_: VQModel, a_: UNetaDModel, a_: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): '''simple docstring''' super().__init__() self.register_modules(vqvae=a_, unet=a_, scheduler=a_ ) @torch.no_grad() def __call__( self: Dict, a_: Union[torch.Tensor, PIL.Image.Image] = None, a_: Optional[int] = 1, a_: Optional[int] = 100, a_: Optional[float] = 0.0, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: Optional[str] = "pil", a_: bool = True, ): '''simple docstring''' if isinstance(a_, PIL.Image.Image ): _snake_case : List[Any] = 1 elif isinstance(a_, torch.Tensor ): _snake_case : int = image.shape[0] else: raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a_ )}" ) if isinstance(a_, PIL.Image.Image ): _snake_case : Optional[int] = preprocess(a_ ) _snake_case , _snake_case : Any = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width) _snake_case : Any = next(self.unet.parameters() ).dtype _snake_case : int = randn_tensor(a_, generator=a_, device=self.device, dtype=a_ ) _snake_case : List[str] = image.to(device=self.device, dtype=a_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(a_, device=self.device ) _snake_case : Optional[int] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _snake_case : str = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _snake_case : Optional[int] = {} if accepts_eta: _snake_case : Optional[Any] = eta for t in self.progress_bar(a_ ): # concat latents and low resolution image in the channel dimension. _snake_case : List[str] = torch.cat([latents, image], dim=1 ) _snake_case : int = self.scheduler.scale_model_input(a_, a_ ) # predict the noise residual _snake_case : Any = self.unet(a_, a_ ).sample # compute the previous noisy sample x_t -> x_t-1 _snake_case : Dict = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample # decode the image latents with the VQVAE _snake_case : Tuple = self.vqvae.decode(a_ ).sample _snake_case : Dict = torch.clamp(a_, -1.0, 1.0 ) _snake_case : Optional[Any] = image / 2 + 0.5 _snake_case : List[Any] = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": _snake_case : List[str] = self.numpy_to_pil(a_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a_ )
28
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
1
"""simple docstring""" import warnings from functools import wraps from typing import Callable def UpperCAmelCase__ (snake_case__ : Callable ): """simple docstring""" @wraps(snake_case__ ) def _inner_fn(*snake_case__ : Any , **snake_case__ : Tuple ): warnings.warn( (F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , snake_case__ , ) return fn(*snake_case__ , **snake_case__ ) return _inner_fn
28
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : Dict = VideoMAEConfig() set_architecture_configs(snake_case__ , snake_case__ ) if "finetuned" not in model_name: _snake_case : List[str] = False if "finetuned" in model_name: _snake_case : str = """huggingface/label-files""" if "kinetics" in model_name: _snake_case : List[str] = 4_00 _snake_case : Any = """kinetics400-id2label.json""" elif "ssv2" in model_name: _snake_case : int = 1_74 _snake_case : Tuple = """something-something-v2-id2label.json""" else: raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" ) _snake_case : List[Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : int = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Any = idalabel _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any] ): """simple docstring""" if "small" in model_name: _snake_case : List[str] = 3_84 _snake_case : Optional[Any] = 15_36 _snake_case : Dict = 12 _snake_case : Optional[int] = 16 _snake_case : Dict = 12 _snake_case : int = 3 _snake_case : Union[str, Any] = 1_92 _snake_case : Optional[int] = 7_68 elif "large" in model_name: _snake_case : Dict = 10_24 _snake_case : int = 40_96 _snake_case : int = 24 _snake_case : int = 16 _snake_case : Optional[int] = 12 _snake_case : Optional[int] = 8 _snake_case : Union[str, Any] = 5_12 _snake_case : Optional[Any] = 20_48 elif "huge" in model_name: _snake_case : Dict = 12_80 _snake_case : Any = 51_20 _snake_case : int = 32 _snake_case : Union[str, Any] = 16 _snake_case : Union[str, Any] = 12 _snake_case : int = 8 _snake_case : List[str] = 6_40 _snake_case : List[str] = 25_60 elif "base" not in model_name: raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" ) def UpperCAmelCase__ (snake_case__ : Tuple ): """simple docstring""" if "encoder." in name: _snake_case : int = name.replace("""encoder.""" , """""" ) if "cls_token" in name: _snake_case : Dict = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" ) if "decoder_pos_embed" in name: _snake_case : Dict = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: _snake_case : Tuple = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: _snake_case : Any = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: _snake_case : Tuple = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" ) if "decoder.blocks" in name: _snake_case : str = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: _snake_case : List[Any] = name.replace("""blocks""" , """videomae.encoder.layer""" ) if "attn.proj" in name: _snake_case : str = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "bias" not in name: _snake_case : Dict = name.replace("""attn""" , """attention.self""" ) if "attn" in name: _snake_case : int = name.replace("""attn""" , """attention.attention""" ) if "norm1" in name: _snake_case : str = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _snake_case : List[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _snake_case : str = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _snake_case : Any = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: _snake_case : Union[str, Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: _snake_case : str = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: _snake_case : List[Any] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: _snake_case : Tuple = name.replace("""norm.weight""" , """videomae.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: _snake_case : Any = name.replace("""norm.bias""" , """videomae.layernorm.bias""" ) if "head" in name and "decoder" not in name: _snake_case : Tuple = name.replace("""head""" , """classifier""" ) return name def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[str] ): """simple docstring""" for key in orig_state_dict.copy().keys(): _snake_case : int = orig_state_dict.pop(snake_case__ ) if key.startswith("""encoder.""" ): _snake_case : Tuple = key.replace("""encoder.""" , """""" ) if "qkv" in key: _snake_case : str = key.split(""".""" ) if key.startswith("""decoder.blocks""" ): _snake_case : int = config.decoder_hidden_size _snake_case : Tuple = int(key_split[2] ) _snake_case : str = """decoder.decoder_layers.""" if "weight" in key: _snake_case : Optional[int] = val[:dim, :] _snake_case : Optional[int] = val[dim : dim * 2, :] _snake_case : Optional[Any] = val[-dim:, :] else: _snake_case : Union[str, Any] = config.hidden_size _snake_case : Union[str, Any] = int(key_split[1] ) _snake_case : str = """videomae.encoder.layer.""" if "weight" in key: _snake_case : List[Any] = val[:dim, :] _snake_case : Tuple = val[dim : dim * 2, :] _snake_case : Dict = val[-dim:, :] else: _snake_case : List[Any] = val return orig_state_dict def UpperCAmelCase__ (): """simple docstring""" _snake_case : Dict = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) _snake_case : Optional[int] = np.load(snake_case__ ) return list(snake_case__ ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Union[str, Any] = get_videomae_config(snake_case__ ) if "finetuned" in model_name: _snake_case : Tuple = VideoMAEForVideoClassification(snake_case__ ) else: _snake_case : int = VideoMAEForPreTraining(snake_case__ ) # download original checkpoint, hosted on Google Drive _snake_case : int = """pytorch_model.bin""" gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ ) _snake_case : List[Any] = torch.load(snake_case__ , map_location="""cpu""" ) if "model" in files: _snake_case : int = files["""model"""] else: _snake_case : Tuple = files["""module"""] _snake_case : List[str] = convert_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() # verify model on basic input _snake_case : List[Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) _snake_case : int = prepare_video() _snake_case : str = image_processor(snake_case__ , return_tensors="""pt""" ) if "finetuned" not in model_name: _snake_case : int = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) _snake_case : Tuple = torch.load(snake_case__ ) _snake_case : int = model(**snake_case__ ) _snake_case : Optional[Any] = outputs.logits _snake_case : Optional[Any] = [ """videomae-small-finetuned-kinetics""", """videomae-small-finetuned-ssv2""", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) """videomae-base-short""", """videomae-base-short-finetuned-kinetics""", """videomae-base""", """videomae-base-finetuned-kinetics""", """videomae-large""", """videomae-large-finetuned-kinetics""", """videomae-huge-finetuned-kinetics""", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) """videomae-base-short-ssv2""", """videomae-base-short-finetuned-ssv2""", """videomae-base-ssv2""", """videomae-base-finetuned-ssv2""", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": _snake_case : Tuple = torch.Size([1, 4_00] ) _snake_case : int = torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": _snake_case : Dict = torch.Size([1, 1_74] ) _snake_case : Any = torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": _snake_case : str = torch.Size([1, 14_08, 15_36] ) _snake_case : List[Any] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": _snake_case : int = torch.Size([1, 14_08, 15_36] ) _snake_case : Optional[int] = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one _snake_case : Any = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": _snake_case : Optional[int] = torch.Size([1, 14_08, 15_36] ) _snake_case : int = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": _snake_case : Tuple = torch.Size([1, 4_00] ) _snake_case : Optional[Any] = torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": _snake_case : Tuple = torch.Size([1, 4_00] ) _snake_case : List[str] = torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": _snake_case : int = torch.Size([1, 4_00] ) _snake_case : Dict = torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": _snake_case : Dict = torch.Size([1, 4_00] ) _snake_case : Tuple = torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": _snake_case : List[str] = torch.Size([1, 14_08, 15_36] ) _snake_case : int = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": _snake_case : Optional[int] = torch.Size([1, 1_74] ) _snake_case : Dict = torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": _snake_case : Tuple = torch.Size([1, 14_08, 15_36] ) _snake_case : Dict = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": _snake_case : str = torch.Size([1, 1_74] ) _snake_case : List[Any] = torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(F"Model name not supported. Should be one of {model_names}" ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) else: print("""Logits:""" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) print("""Logits ok!""" ) # verify loss, if applicable if model_name == "videomae-base-short": _snake_case : Union[str, Any] = outputs.loss assert torch.allclose(snake_case__ , snake_case__ , atol=1e-4 ) print("""Loss ok!""" ) if pytorch_dump_folder_path is not None: print(F"Saving model and image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) model.save_pretrained(snake_case__ ) if push_to_hub: print("""Pushing to the hub...""" ) model.push_to_hub(snake_case__ , organization="""nielsr""" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''', type=str, help=( '''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct''' ''' download link.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''/Users/nielsrogge/Documents/VideoMAE/Test''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) A_ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
28
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
1
"""simple docstring""" import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = CpmAntTokenizer lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' super().setUp() _snake_case : List[Any] = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] _snake_case : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) @tooslow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" ) _snake_case : Optional[Any] = """今天天气真好!""" _snake_case : List[Any] = ["""今天""", """天气""", """真""", """好""", """!"""] _snake_case : Tuple = tokenizer.tokenize(a_ ) self.assertListEqual(a_, a_ ) _snake_case : List[Any] = """今天天气真好!""" _snake_case : Union[str, Any] = [tokenizer.bos_token] + tokens _snake_case : Dict = [6, 9_802, 14_962, 2_082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ ) _snake_case : Dict = tokenizer.decode(a_ ) self.assertEqual(a_, a_ )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A_ = '''▁''' A_ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = BigBirdTokenizer lowercase__ = BigBirdTokenizerFast lowercase__ = True lowercase__ = True def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' super().setUp() _snake_case : Union[str, Any] = self.tokenizer_class(a_, keep_accents=a_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Any = """<s>""" _snake_case : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], """<unk>""" ) self.assertEqual(vocab_keys[1], """<s>""" ) self.assertEqual(vocab_keys[-1], """[MASK]""" ) self.assertEqual(len(a_ ), 1_004 ) def UpperCamelCase_ ( self: int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1_000 ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return _snake_case : Optional[int] = self.get_tokenizer() _snake_case : Any = self.get_rust_tokenizer() _snake_case : Optional[Any] = """I was born in 92000, and this is falsé.""" _snake_case : Tuple = tokenizer.tokenize(a_ ) _snake_case : Tuple = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_, a_ ) _snake_case : List[str] = tokenizer.encode(a_, add_special_tokens=a_ ) _snake_case : Optional[Any] = rust_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) _snake_case : Tuple = self.get_rust_tokenizer() _snake_case : Union[str, Any] = tokenizer.encode(a_ ) _snake_case : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : int = BigBirdTokenizer(a_, keep_accents=a_ ) _snake_case : int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a_ ), [285, 46, 10, 170, 382], ) _snake_case : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( a_, [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ], ) _snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual( a_, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) _snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ ) self.assertListEqual( a_, [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ], ) @cached_property def UpperCamelCase_ ( self: str ): '''simple docstring''' return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" ) @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """Hello World!""" _snake_case : List[str] = [65, 18_536, 2_260, 101, 66] self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Any = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) # fmt: off _snake_case : Union[str, Any] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) ) @require_torch @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence _snake_case : int = list(self.big_tokenizer.get_vocab().keys() )[:10] _snake_case : List[Any] = """ """.join(a_ ) _snake_case : Dict = self.big_tokenizer.encode_plus(a_, return_tensors="""pt""", return_token_type_ids=a_ ) _snake_case : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence], return_tensors="""pt""", return_token_type_ids=a_ ) _snake_case : Any = BigBirdConfig(attention_type="""original_full""" ) _snake_case : str = BigBirdModel(a_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a_ ) model(**a_ ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" ) _snake_case : Optional[int] = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids ) self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" ) @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Any = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_, model_name="""google/bigbird-roberta-base""", revision="""215c99f1600e06f83acce68422f2035b2b5c3510""", )
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowercase( __a ): '''simple docstring''' def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(a_, """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(a_, """num_attention_heads""" ) ) class lowercase: '''simple docstring''' def __init__( self: int, a_: Optional[Any], a_: Union[str, Any]=13, a_: Dict=64, a_: Union[str, Any]=3, a_: Union[str, Any]=3, a_: Optional[int]=2, a_: List[str]=1, a_: Optional[Any]=16, a_: str=[128, 256, 384], a_: Optional[int]=[4, 6, 8], a_: Optional[Any]=[2, 3, 4], a_: Optional[Any]=[16, 16, 16], a_: Tuple=0, a_: Dict=[2, 2, 2], a_: Dict=[2, 2, 2], a_: Optional[int]=0.02, a_: Optional[int]=True, a_: List[Any]=True, a_: Optional[int]=2, ): '''simple docstring''' _snake_case : int = parent _snake_case : Tuple = batch_size _snake_case : int = image_size _snake_case : Dict = num_channels _snake_case : Union[str, Any] = kernel_size _snake_case : Optional[Any] = stride _snake_case : int = padding _snake_case : Optional[int] = hidden_sizes _snake_case : Dict = num_attention_heads _snake_case : List[Any] = depths _snake_case : List[str] = key_dim _snake_case : Dict = drop_path_rate _snake_case : Tuple = patch_size _snake_case : Optional[int] = attention_ratio _snake_case : List[Any] = mlp_ratio _snake_case : Dict = initializer_range _snake_case : List[Any] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] _snake_case : List[Any] = is_training _snake_case : str = use_labels _snake_case : List[str] = num_labels _snake_case : List[str] = initializer_range def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Union[str, Any] = None if self.use_labels: _snake_case : Tuple = ids_tensor([self.batch_size], self.num_labels ) _snake_case : str = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' return LevitConfig( image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, ) def UpperCamelCase_ ( self: str, a_: str, a_: Union[str, Any], a_: Tuple ): '''simple docstring''' _snake_case : Any = LevitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Optional[Any] = model(a_ ) _snake_case : Optional[int] = (self.image_size, self.image_size) _snake_case , _snake_case : Optional[int] = image_size[0], image_size[1] for _ in range(4 ): _snake_case : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) _snake_case : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), ) def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: Optional[Any], a_: str ): '''simple docstring''' _snake_case : Dict = self.num_labels _snake_case : Any = LevitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Tuple = config_and_inputs _snake_case : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : List[str] = LevitModelTester(self ) _snake_case : List[str] = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return @unittest.skip(reason="""Levit does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' pass @unittest.skip(reason="""Levit does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="""Levit does not output attentions""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Optional[int], a_: Tuple, a_: List[Any] ): _snake_case : str = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Optional[Any] = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[Any] = outputs.hidden_states _snake_case : Dict = len(self.model_tester.depths ) + 1 self.assertEqual(len(a_ ), a_ ) _snake_case : Union[str, Any] = (self.model_tester.image_size, self.model_tester.image_size) _snake_case , _snake_case : List[str] = image_size[0], image_size[1] for _ in range(4 ): _snake_case : Tuple = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) _snake_case : List[str] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [ height * width, self.model_tester.hidden_sizes[0], ], ) _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' pass def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: List[Any], a_: Optional[int]=False ): '''simple docstring''' _snake_case : List[str] = super()._prepare_for_class(a_, a_, return_labels=a_ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(a_ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue _snake_case : int = model_class(a_ ) model.to(a_ ) model.train() _snake_case : int = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Tuple = False _snake_case : int = True for model_class in self.all_model_classes: if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue _snake_case : Optional[Any] = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : int = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[str] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Optional[Any] = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(a_ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ): _snake_case : str = problem_type["""title"""] _snake_case : Tuple = problem_type["""num_labels"""] _snake_case : Optional[int] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) if problem_type["num_labels"] > 1: _snake_case : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1, problem_type["""num_labels"""] ) _snake_case : Optional[int] = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=a_ ) as warning_list: _snake_case : Any = model(**a_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = LevitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: int ): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( a_ ) _snake_case : Any = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : List[Any] = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
1
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration A_ = pytest.mark.integration A_ = {'''comet'''} A_ = importlib.util.find_spec('''fairseq''') is not None A_ = {'''code_eval'''} A_ = os.name == '''nt''' A_ = {'''bertscore''', '''frugalscore''', '''perplexity'''} A_ = importlib.util.find_spec('''transformers''') is not None def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" @wraps(snake_case__ ) def wrapper(self : Dict , snake_case__ : Optional[Any] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , snake_case__ ) return wrapper def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" @wraps(snake_case__ ) def wrapper(self : Optional[Any] , snake_case__ : str ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , snake_case__ ) return wrapper def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" @wraps(snake_case__ ) def wrapper(self : Dict , snake_case__ : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , snake_case__ ) return wrapper def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __a , __a , __a ) @local class lowercase( parameterized.TestCase ): '''simple docstring''' lowercase__ = {} lowercase__ = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" ) def UpperCamelCase_ ( self: List[str], a_: List[str] ): '''simple docstring''' _snake_case : Any = """[...]""" _snake_case : str = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""", a_ ) ).module_path ) _snake_case : List[str] = datasets.load.import_main_class(metric_module.__name__, dataset=a_ ) # check parameters _snake_case : str = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(a_, metric_module.__name__ ): with self.use_local_metrics(): try: _snake_case : int = doctest.testmod(a_, verbose=a_, raise_on_error=a_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed, 0 ) self.assertGreater(results.attempted, 1 ) @slow def UpperCamelCase_ ( self: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : Tuple = """[...]""" _snake_case : str = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""", a_ ) ).module_path ) # run doctest with self.use_local_metrics(): _snake_case : int = doctest.testmod(a_, verbose=a_, raise_on_error=a_ ) self.assertEqual(results.failed, 0 ) self.assertGreater(results.attempted, 1 ) @contextmanager def UpperCamelCase_ ( self: List[Any], a_: Tuple, a_: Dict ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](a_ ): yield else: yield @contextmanager def UpperCamelCase_ ( self: str ): '''simple docstring''' def load_local_metric(a_: Any, *a_: List[Any], **a_: Optional[Any] ): return load_metric(os.path.join("""metrics""", a_ ), *a_, **a_ ) with patch("""datasets.load_metric""" ) as mock_load_metric: _snake_case : str = load_local_metric yield @classmethod def UpperCamelCase_ ( cls: List[str], a_: Union[str, Any] ): '''simple docstring''' def wrapper(a_: int ): _snake_case : str = contextmanager(a_ ) _snake_case : Union[str, Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class lowercase( __a ): '''simple docstring''' def UpperCamelCase_ ( self: int, a_: Union[str, Any] ): '''simple docstring''' assert len(input_dict["""input_ids"""] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: _snake_case : Union[str, Any] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def UpperCAmelCase__ (snake_case__ : Optional[Any] ): """simple docstring""" import torch def bert_cos_score_idf(snake_case__ : Optional[Any] , snake_case__ : List[Any] , *snake_case__ : List[str] , **snake_case__ : int ): return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: _snake_case : Union[str, Any] = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" def load_from_checkpoint(snake_case__ : Dict ): class lowercase: '''simple docstring''' def UpperCamelCase_ ( self: Optional[int], a_: Union[str, Any], *a_: Optional[Any], **a_: Dict ): '''simple docstring''' assert len(a_ ) == 2 _snake_case : Optional[int] = [0.19, 0.92] return scores, sum(a_ ) / len(a_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: _snake_case : Any = None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: _snake_case : Union[str, Any] = load_from_checkpoint yield def UpperCAmelCase__ (): """simple docstring""" _snake_case : Dict = load_metric(os.path.join("""metrics""" , """seqeval""" ) ) _snake_case : List[str] = """ERROR""" _snake_case : int = F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}" with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ): metric.compute(predictions=[] , references=[] , scheme=snake_case__ )
28
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[Any] ): """simple docstring""" if height >= 1: move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__ ) move_disk(snake_case__ , snake_case__ ) move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Union[str, Any] ): """simple docstring""" print("""moving disk from""" , snake_case__ , """to""" , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = int(input("""Height of hanoi: """ ).strip() ) move_tower(snake_case__ , """A""" , """B""" , """C""" ) if __name__ == "__main__": main()
28
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowercase( __a ): '''simple docstring''' lowercase__ = 42 class lowercase( __a , __a ): '''simple docstring''' @register_to_config def __init__( self: Dict, a_: int = 3, a_: int = 3, a_: Tuple[str] = ("DownEncoderBlock2D",), a_: Tuple[str] = ("UpDecoderBlock2D",), a_: Tuple[int] = (64,), a_: int = 1, a_: str = "silu", a_: int = 3, a_: int = 32, a_: int = 256, a_: int = 32, a_: Optional[int] = None, a_: float = 0.18_215, a_: str = "group", ): '''simple docstring''' super().__init__() # pass init params to Encoder _snake_case : Dict = Encoder( in_channels=a_, out_channels=a_, down_block_types=a_, block_out_channels=a_, layers_per_block=a_, act_fn=a_, norm_num_groups=a_, double_z=a_, ) _snake_case : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels _snake_case : str = nn.Convad(a_, a_, 1 ) _snake_case : List[str] = VectorQuantizer(a_, a_, beta=0.25, remap=a_, sane_index_shape=a_ ) _snake_case : Dict = nn.Convad(a_, a_, 1 ) # pass init params to Decoder _snake_case : Any = Decoder( in_channels=a_, out_channels=a_, up_block_types=a_, block_out_channels=a_, layers_per_block=a_, act_fn=a_, norm_num_groups=a_, norm_type=a_, ) @apply_forward_hook def UpperCamelCase_ ( self: str, a_: torch.FloatTensor, a_: bool = True ): '''simple docstring''' _snake_case : Optional[int] = self.encoder(a_ ) _snake_case : int = self.quant_conv(a_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=a_ ) @apply_forward_hook def UpperCamelCase_ ( self: Tuple, a_: torch.FloatTensor, a_: bool = False, a_: bool = True ): '''simple docstring''' if not force_not_quantize: _snake_case , _snake_case , _snake_case : Optional[int] = self.quantize(a_ ) else: _snake_case : List[Any] = h _snake_case : Union[str, Any] = self.post_quant_conv(a_ ) _snake_case : Any = self.decoder(a_, quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: torch.FloatTensor, a_: bool = True ): '''simple docstring''' _snake_case : str = sample _snake_case : Union[str, Any] = self.encode(a_ ).latents _snake_case : str = self.decode(a_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=a_ )
28
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
1
"""simple docstring""" from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging A_ = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class lowercase( __a ): '''simple docstring''' def __init__( self: Dict, a_: int = 101 ): '''simple docstring''' _snake_case : Optional[Any] = length def __len__( self: Dict ): '''simple docstring''' return self.length def __getitem__( self: Optional[Any], a_: Union[str, Any] ): '''simple docstring''' return i class lowercase: '''simple docstring''' def __call__( self: Tuple, a_: Optional[int] ): '''simple docstring''' return {"input_ids": torch.tensor(a_ ), "labels": torch.tensor(a_ )} class lowercase( nn.Module ): '''simple docstring''' def __init__( self: Union[str, Any] ): '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. _snake_case : Optional[int] = nn.Linear(120, 80 ) def UpperCamelCase_ ( self: Optional[int], a_: Dict, a_: Optional[int]=None ): '''simple docstring''' if labels is not None: return torch.tensor(0.0, device=input_ids.device ), input_ids else: return input_ids class lowercase( __a ): '''simple docstring''' @require_torch_neuroncore def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() _snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir() _snake_case : int = f"--output_dir {output_dir}".split() _snake_case : List[Any] = ["""torchrun"""] + distributed_args + args execute_subprocess_async(a_, env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class lowercase( __a ): '''simple docstring''' @require_torch_multi_gpu def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Optional[int] = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() _snake_case : Tuple = self.get_auto_remove_tmp_dir() _snake_case : Tuple = f"--output_dir {output_dir}".split() _snake_case : Optional[int] = ["""torchrun"""] + distributed_args + args execute_subprocess_async(a_, env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py A_ = HfArgumentParser((TrainingArguments,)) A_ = parser.parse_args_into_dataclasses()[0] logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_01, 40, 7]: A_ = DummyDataset(dataset_length) def UpperCAmelCase__ (snake_case__ : EvalPrediction ): """simple docstring""" _snake_case : Tuple = list(range(len(snake_case__ ) ) ) _snake_case : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( """Predictions and/or labels do not match expected results:\n - predictions: """ F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} A_ = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) A_ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) A_ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) A_ = 2 A_ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) A_ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) A_ = None
28
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' raise NotImplementedError()
28
1
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = MgpstrTokenizer lowercase__ = False lowercase__ = {} lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' super().setUp() # fmt: off _snake_case : int = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) ) _snake_case : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp: fp.write(json.dumps(a_ ) + """\n""" ) def UpperCamelCase_ ( self: Optional[Any], **a_: int ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: str, a_: Dict ): '''simple docstring''' _snake_case : Dict = """tester""" _snake_case : List[str] = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.get_tokenizers(do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[Any] = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) _snake_case : str = tokenizer.encode([special_token], add_special_tokens=a_ ) self.assertEqual(len(a_ ), 1 ) _snake_case : Any = tokenizer.decode(a_, skip_special_tokens=a_ ) self.assertTrue(special_token not in decoded ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case , _snake_case : int = self.get_input_output_texts(a_ ) _snake_case : Any = tokenizer.tokenize(a_ ) _snake_case : Dict = tokenizer.convert_tokens_to_ids(a_ ) _snake_case : Tuple = tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) _snake_case : Any = tokenizer.convert_ids_to_tokens(a_ ) self.assertNotEqual(len(a_ ), 0 ) _snake_case : Optional[Any] = tokenizer.decode(a_ ) self.assertIsInstance(a_, a_ ) self.assertEqual(text_a.replace(""" """, """""" ), a_ ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass
28
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
1
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): A_ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right A_ = 12_80_22 A_ = 12_80_28 @require_sentencepiece class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = MaMaaaTokenizer lowercase__ = False lowercase__ = False lowercase__ = True def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' super().setUp() _snake_case : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] _snake_case : Dict = dict(zip(a_, range(len(a_ ) ) ) ) _snake_case : Tuple = Path(self.tmpdirname ) save_json(a_, save_dir / VOCAB_FILES_NAMES["""vocab_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(a_, save_dir / VOCAB_FILES_NAMES["""spm_file"""] ) _snake_case : Optional[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self: Optional[Any], **a_: Optional[int] ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' return ( "This is a test", "This is a test", ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = """</s>""" _snake_case : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = self.get_tokenizer() _snake_case : Dict = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0], """</s>""" ) self.assertEqual(vocab_keys[1], """<unk>""" ) self.assertEqual(vocab_keys[-1], """<s>""" ) self.assertEqual(len(a_ ), tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("""Skip this test while all models are still to be uploaded.""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Tuple = self.get_tokenizer() _snake_case : str = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a_ ), [2, 3, 4, 5, 6], ) _snake_case : List[str] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) _snake_case : Optional[int] = tokenizer.convert_tokens_to_string(a_ ) self.assertEqual(a_, """This is a test""" ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Tuple = {"""input_ids""": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_, model_name="""facebook/m2m100_418M""", revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""", ) @require_torch @require_sentencepiece @require_tokenizers class lowercase( unittest.TestCase ): '''simple docstring''' lowercase__ = "facebook/m2m100_418M" lowercase__ = [ "In my opinion, there are two levels of response from the French government.", "NSA Affair Emphasizes Complete Lack of Debate on Intelligence", ] lowercase__ = [ "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "L'affaire NSA souligne l'absence totale de débat sur le renseignement", ] # fmt: off lowercase__ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def UpperCamelCase_ ( cls: List[Any] ): '''simple docstring''' _snake_case : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name, src_lang="""en""", tgt_lang="""fr""" ) _snake_case : Dict = 1 return cls def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id("""ar""" ), 128_006 ) self.assertEqual(self.tokenizer.get_lang_id("""en""" ), 128_022 ) self.assertEqual(self.tokenizer.get_lang_id("""ro""" ), 128_076 ) self.assertEqual(self.tokenizer.get_lang_id("""mr""" ), 128_063 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer.get_vocab() self.assertEqual(len(a_ ), self.tokenizer.vocab_size ) self.assertEqual(vocab["""<unk>"""], 3 ) self.assertIn(self.tokenizer.get_lang_token("""en""" ), a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[Any] = """en""" _snake_case : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' self.assertIn(a_, self.tokenizer.all_special_ids ) # fmt: off _snake_case : Union[str, Any] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on _snake_case : Any = self.tokenizer.decode(a_, skip_special_tokens=a_ ) _snake_case : Optional[Any] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=a_ ) self.assertEqual(a_, a_ ) self.assertNotIn(self.tokenizer.eos_token, a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : int = tempfile.mkdtemp() _snake_case : int = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = MaMaaaTokenizer.from_pretrained(a_ ) self.assertDictEqual(new_tok.lang_token_to_id, a_ ) @require_torch def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = """en""" _snake_case : Union[str, Any] = """fr""" _snake_case : Optional[int] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=a_, return_tensors="""pt""" ) _snake_case : List[str] = shift_tokens_right( batch["""labels"""], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id ) for k in batch: _snake_case : Optional[int] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = """mr""" self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("""mr""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) _snake_case : str = """zh""" self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("""zh""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) @require_torch def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = """mr""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("""mr""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _snake_case : Dict = """zh""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("""zh""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.tokenizer._build_translation_inputs("""A test""", return_tensors="""pt""", src_lang="""en""", tgt_lang="""ar""" ) self.assertEqual( nested_simplify(a_ ), { # en_XX, A, test, EOS """input_ids""": [[128_022, 58, 4_183, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 128_006, }, )
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : float = 1 / sqrt(2 ) ): """simple docstring""" _snake_case : Tuple = tau * frequency / samplerate _snake_case : Any = sin(snake_case__ ) _snake_case : Tuple = cos(snake_case__ ) _snake_case : Any = _sin / (2 * q_factor) _snake_case : Optional[Any] = (1 - _cos) / 2 _snake_case : int = 1 - _cos _snake_case : Optional[Any] = 1 + alpha _snake_case : List[str] = -2 * _cos _snake_case : Optional[Any] = 1 - alpha _snake_case : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : float = 1 / sqrt(2 ) ): """simple docstring""" _snake_case : List[Any] = tau * frequency / samplerate _snake_case : str = sin(snake_case__ ) _snake_case : Union[str, Any] = cos(snake_case__ ) _snake_case : Dict = _sin / (2 * q_factor) _snake_case : List[Any] = (1 + _cos) / 2 _snake_case : Optional[Any] = -1 - _cos _snake_case : Tuple = 1 + alpha _snake_case : int = -2 * _cos _snake_case : Optional[Any] = 1 - alpha _snake_case : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : float = 1 / sqrt(2 ) ): """simple docstring""" _snake_case : List[str] = tau * frequency / samplerate _snake_case : int = sin(snake_case__ ) _snake_case : List[Any] = cos(snake_case__ ) _snake_case : List[Any] = _sin / (2 * q_factor) _snake_case : List[Any] = _sin / 2 _snake_case : List[str] = 0 _snake_case : int = -ba _snake_case : Optional[int] = 1 + alpha _snake_case : Tuple = -2 * _cos _snake_case : Tuple = 1 - alpha _snake_case : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : float = 1 / sqrt(2 ) ): """simple docstring""" _snake_case : Dict = tau * frequency / samplerate _snake_case : List[str] = sin(snake_case__ ) _snake_case : Dict = cos(snake_case__ ) _snake_case : Union[str, Any] = _sin / (2 * q_factor) _snake_case : Any = 1 - alpha _snake_case : str = -2 * _cos _snake_case : List[str] = 1 + alpha _snake_case : Any = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : float = 1 / sqrt(2 ) , ): """simple docstring""" _snake_case : List[Any] = tau * frequency / samplerate _snake_case : List[Any] = sin(snake_case__ ) _snake_case : str = cos(snake_case__ ) _snake_case : List[Any] = _sin / (2 * q_factor) _snake_case : Dict = 10 ** (gain_db / 40) _snake_case : int = 1 + alpha * big_a _snake_case : Dict = -2 * _cos _snake_case : List[str] = 1 - alpha * big_a _snake_case : Any = 1 + alpha / big_a _snake_case : int = -2 * _cos _snake_case : Tuple = 1 - alpha / big_a _snake_case : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : float = 1 / sqrt(2 ) , ): """simple docstring""" _snake_case : List[Any] = tau * frequency / samplerate _snake_case : Union[str, Any] = sin(snake_case__ ) _snake_case : str = cos(snake_case__ ) _snake_case : Optional[int] = _sin / (2 * q_factor) _snake_case : int = 10 ** (gain_db / 40) _snake_case : Optional[int] = (big_a + 1) - (big_a - 1) * _cos _snake_case : List[str] = (big_a + 1) + (big_a - 1) * _cos _snake_case : List[Any] = (big_a - 1) - (big_a + 1) * _cos _snake_case : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos _snake_case : Optional[Any] = 2 * sqrt(snake_case__ ) * alpha _snake_case : Dict = big_a * (pmc + aaa) _snake_case : Optional[Any] = 2 * big_a * mpc _snake_case : Optional[Any] = big_a * (pmc - aaa) _snake_case : Union[str, Any] = ppmc + aaa _snake_case : Optional[int] = -2 * pmpc _snake_case : Tuple = ppmc - aaa _snake_case : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : float = 1 / sqrt(2 ) , ): """simple docstring""" _snake_case : Union[str, Any] = tau * frequency / samplerate _snake_case : List[str] = sin(snake_case__ ) _snake_case : str = cos(snake_case__ ) _snake_case : List[str] = _sin / (2 * q_factor) _snake_case : Dict = 10 ** (gain_db / 40) _snake_case : Tuple = (big_a + 1) - (big_a - 1) * _cos _snake_case : Tuple = (big_a + 1) + (big_a - 1) * _cos _snake_case : Any = (big_a - 1) - (big_a + 1) * _cos _snake_case : List[str] = (big_a - 1) + (big_a + 1) * _cos _snake_case : Optional[int] = 2 * sqrt(snake_case__ ) * alpha _snake_case : Union[str, Any] = big_a * (ppmc + aaa) _snake_case : Dict = -2 * big_a * pmpc _snake_case : Optional[int] = big_a * (ppmc - aaa) _snake_case : int = pmc + aaa _snake_case : str = 2 * mpc _snake_case : Optional[Any] = pmc - aaa _snake_case : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = StableDiffusionDiffEditPipeline lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} lowercase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase__ = frozenset([] ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=a_, ) _snake_case : Optional[Any] = DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=a_, set_alpha_to_one=a_, ) _snake_case : Dict = DDIMInverseScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=a_, set_alpha_to_zero=a_, ) torch.manual_seed(0 ) _snake_case : List[Any] = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) _snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act="""gelu""", projection_dim=512, ) _snake_case : Any = CLIPTextModel(a_ ) _snake_case : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _snake_case : List[Any] = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: Union[str, Any]=0 ): '''simple docstring''' _snake_case : str = floats_tensor((1, 16, 16), rng=random.Random(a_ ) ).to(a_ ) _snake_case : Dict = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith("""mps""" ): _snake_case : Union[str, Any] = torch.manual_seed(a_ ) else: _snake_case : Tuple = torch.Generator(device=a_ ).manual_seed(a_ ) _snake_case : List[str] = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self: Any, a_: Tuple, a_: str=0 ): '''simple docstring''' _snake_case : str = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ ) _snake_case : Union[str, Any] = image.cpu().permute(0, 2, 3, 1 )[0] _snake_case : int = Image.fromarray(np.uinta(a_ ) ).convert("""RGB""" ) if str(a_ ).startswith("""mps""" ): _snake_case : List[str] = torch.manual_seed(a_ ) else: _snake_case : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) _snake_case : Union[str, Any] = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self: Optional[int], a_: Optional[Any], a_: int=0 ): '''simple docstring''' _snake_case : List[Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ ) _snake_case : Optional[int] = image.cpu().permute(0, 2, 3, 1 )[0] _snake_case : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert("""RGB""" ) if str(a_ ).startswith("""mps""" ): _snake_case : str = torch.manual_seed(a_ ) else: _snake_case : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) _snake_case : Tuple = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self: Any ): '''simple docstring''' if not hasattr(self.pipeline_class, """_optional_components""" ): return _snake_case : str = self.get_dummy_components() _snake_case : List[str] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a_, a_, a_ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) _snake_case : List[Any] = self.get_dummy_inputs(a_ ) _snake_case : Tuple = pipe(**a_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a_ ) _snake_case : Dict = self.pipeline_class.from_pretrained(a_ ) pipe_loaded.to(a_ ) pipe_loaded.set_progress_bar_config(disable=a_ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(a_, a_ ) is None, f"`{optional_component}` did not stay set to None after loading.", ) _snake_case : int = self.get_dummy_inputs(a_ ) _snake_case : Optional[int] = pipe_loaded(**a_ )[0] _snake_case : Any = np.abs(output - output_loaded ).max() self.assertLess(a_, 1E-4 ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = """cpu""" _snake_case : Dict = self.get_dummy_components() _snake_case : Any = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) _snake_case : Tuple = self.get_dummy_mask_inputs(a_ ) _snake_case : Union[str, Any] = pipe.generate_mask(**a_ ) _snake_case : str = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) _snake_case : Optional[Any] = np.array([0] * 9 ) _snake_case : Tuple = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Optional[Any] = """cpu""" _snake_case : Tuple = self.get_dummy_components() _snake_case : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) _snake_case : Union[str, Any] = self.get_dummy_inversion_inputs(a_ ) _snake_case : Optional[Any] = pipe.invert(**a_ ).images _snake_case : List[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) _snake_case : Dict = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799], ) _snake_case : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_, 1E-3 ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = """cpu""" _snake_case : List[Any] = self.get_dummy_components() _snake_case : Optional[Any] = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""} _snake_case : List[str] = DPMSolverMultistepScheduler(**a_ ) _snake_case : List[str] = DPMSolverMultistepInverseScheduler(**a_ ) _snake_case : Optional[Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) _snake_case : int = self.get_dummy_inversion_inputs(a_ ) _snake_case : Union[str, Any] = pipe.invert(**a_ ).images _snake_case : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) _snake_case : Dict = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799], ) _snake_case : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_, 1E-3 ) @require_torch_gpu @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) _snake_case : Optional[Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) _snake_case : List[Any] = raw_image def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : str = torch.manual_seed(0 ) _snake_case : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""", safety_checker=a_, torch_dtype=torch.floataa ) _snake_case : Optional[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) _snake_case : str = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) _snake_case : str = """a bowl of fruit""" _snake_case : str = """a bowl of pears""" _snake_case : str = pipe.generate_mask( image=self.raw_image, source_prompt=a_, target_prompt=a_, generator=a_, ) _snake_case : str = pipe.invert( prompt=a_, image=self.raw_image, inpaint_strength=0.7, generator=a_ ).latents _snake_case : Optional[Any] = pipe( prompt=a_, mask_image=a_, image_latents=a_, generator=a_, negative_prompt=a_, inpaint_strength=0.7, output_type="""numpy""", ).images[0] _snake_case : str = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = torch.manual_seed(0 ) _snake_case : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""", safety_checker=a_, torch_dtype=torch.floataa ) _snake_case : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _snake_case : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) _snake_case : Any = """a bowl of fruit""" _snake_case : Union[str, Any] = """a bowl of pears""" _snake_case : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=a_, target_prompt=a_, generator=a_, ) _snake_case : int = pipe.invert( prompt=a_, image=self.raw_image, inpaint_strength=0.7, generator=a_, num_inference_steps=25, ).latents _snake_case : Optional[Any] = pipe( prompt=a_, mask_image=a_, image_latents=a_, generator=a_, negative_prompt=a_, inpaint_strength=0.7, num_inference_steps=25, output_type="""numpy""", ).images[0] _snake_case : Dict = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise TypeError("""Input value must be an 'int' type""" ) _snake_case : Dict = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
1
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker A_ = '''CompVis/stable-diffusion-v1-1''' A_ = '''CompVis/stable-diffusion-v1-2''' A_ = '''CompVis/stable-diffusion-v1-3''' A_ = '''CompVis/stable-diffusion-v1-4''' class lowercase( __a ): '''simple docstring''' def __init__( self: Union[str, Any], a_: AutoencoderKL, a_: CLIPTextModel, a_: CLIPTokenizer, a_: UNetaDConditionModel, a_: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], a_: StableDiffusionSafetyChecker, a_: CLIPImageProcessor, a_: bool = True, ): '''simple docstring''' super()._init_() _snake_case : Tuple = StableDiffusionPipeline.from_pretrained(a_ ) _snake_case : int = StableDiffusionPipeline.from_pretrained(a_ ) _snake_case : Any = StableDiffusionPipeline.from_pretrained(a_ ) _snake_case : Any = StableDiffusionPipeline( vae=a_, text_encoder=a_, tokenizer=a_, unet=a_, scheduler=a_, safety_checker=a_, feature_extractor=a_, requires_safety_checker=a_, ) self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea ) @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' return {k: getattr(self, a_ ) for k in self.config.keys() if not k.startswith("""_""" )} def UpperCamelCase_ ( self: str, a_: Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _snake_case : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' self.enable_attention_slicing(a_ ) @torch.no_grad() def UpperCamelCase_ ( self: Union[str, Any], a_: Union[str, List[str]], a_: int = 512, a_: int = 512, a_: int = 50, a_: float = 7.5, a_: Optional[Union[str, List[str]]] = None, a_: Optional[int] = 1, a_: float = 0.0, a_: Optional[torch.Generator] = None, a_: Optional[torch.FloatTensor] = None, a_: Optional[str] = "pil", a_: bool = True, a_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, a_: int = 1, **a_: List[str], ): '''simple docstring''' return self.pipea( prompt=a_, height=a_, width=a_, num_inference_steps=a_, guidance_scale=a_, negative_prompt=a_, num_images_per_prompt=a_, eta=a_, generator=a_, latents=a_, output_type=a_, return_dict=a_, callback=a_, callback_steps=a_, **a_, ) @torch.no_grad() def UpperCamelCase_ ( self: List[Any], a_: Union[str, List[str]], a_: int = 512, a_: int = 512, a_: int = 50, a_: float = 7.5, a_: Optional[Union[str, List[str]]] = None, a_: Optional[int] = 1, a_: float = 0.0, a_: Optional[torch.Generator] = None, a_: Optional[torch.FloatTensor] = None, a_: Optional[str] = "pil", a_: bool = True, a_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, a_: int = 1, **a_: Any, ): '''simple docstring''' return self.pipea( prompt=a_, height=a_, width=a_, num_inference_steps=a_, guidance_scale=a_, negative_prompt=a_, num_images_per_prompt=a_, eta=a_, generator=a_, latents=a_, output_type=a_, return_dict=a_, callback=a_, callback_steps=a_, **a_, ) @torch.no_grad() def UpperCamelCase_ ( self: Dict, a_: Union[str, List[str]], a_: int = 512, a_: int = 512, a_: int = 50, a_: float = 7.5, a_: Optional[Union[str, List[str]]] = None, a_: Optional[int] = 1, a_: float = 0.0, a_: Optional[torch.Generator] = None, a_: Optional[torch.FloatTensor] = None, a_: Optional[str] = "pil", a_: bool = True, a_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, a_: int = 1, **a_: Optional[int], ): '''simple docstring''' return self.pipea( prompt=a_, height=a_, width=a_, num_inference_steps=a_, guidance_scale=a_, negative_prompt=a_, num_images_per_prompt=a_, eta=a_, generator=a_, latents=a_, output_type=a_, return_dict=a_, callback=a_, callback_steps=a_, **a_, ) @torch.no_grad() def UpperCamelCase_ ( self: Any, a_: Union[str, List[str]], a_: int = 512, a_: int = 512, a_: int = 50, a_: float = 7.5, a_: Optional[Union[str, List[str]]] = None, a_: Optional[int] = 1, a_: float = 0.0, a_: Optional[torch.Generator] = None, a_: Optional[torch.FloatTensor] = None, a_: Optional[str] = "pil", a_: bool = True, a_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, a_: int = 1, **a_: Union[str, Any], ): '''simple docstring''' return self.pipea( prompt=a_, height=a_, width=a_, num_inference_steps=a_, guidance_scale=a_, negative_prompt=a_, num_images_per_prompt=a_, eta=a_, generator=a_, latents=a_, output_type=a_, return_dict=a_, callback=a_, callback_steps=a_, **a_, ) @torch.no_grad() def UpperCamelCase_ ( self: str, a_: Union[str, List[str]], a_: int = 512, a_: int = 512, a_: int = 50, a_: float = 7.5, a_: Optional[Union[str, List[str]]] = None, a_: Optional[int] = 1, a_: float = 0.0, a_: Optional[torch.Generator] = None, a_: Optional[torch.FloatTensor] = None, a_: Optional[str] = "pil", a_: bool = True, a_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, a_: int = 1, **a_: int, ): '''simple docstring''' _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(a_ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." ) # Get first result from Stable Diffusion Checkpoint v1.1 _snake_case : Dict = self.textaimg_sda_a( prompt=a_, height=a_, width=a_, num_inference_steps=a_, guidance_scale=a_, negative_prompt=a_, num_images_per_prompt=a_, eta=a_, generator=a_, latents=a_, output_type=a_, return_dict=a_, callback=a_, callback_steps=a_, **a_, ) # Get first result from Stable Diffusion Checkpoint v1.2 _snake_case : str = self.textaimg_sda_a( prompt=a_, height=a_, width=a_, num_inference_steps=a_, guidance_scale=a_, negative_prompt=a_, num_images_per_prompt=a_, eta=a_, generator=a_, latents=a_, output_type=a_, return_dict=a_, callback=a_, callback_steps=a_, **a_, ) # Get first result from Stable Diffusion Checkpoint v1.3 _snake_case : Optional[int] = self.textaimg_sda_a( prompt=a_, height=a_, width=a_, num_inference_steps=a_, guidance_scale=a_, negative_prompt=a_, num_images_per_prompt=a_, eta=a_, generator=a_, latents=a_, output_type=a_, return_dict=a_, callback=a_, callback_steps=a_, **a_, ) # Get first result from Stable Diffusion Checkpoint v1.4 _snake_case : Optional[int] = self.textaimg_sda_a( prompt=a_, height=a_, width=a_, num_inference_steps=a_, guidance_scale=a_, negative_prompt=a_, num_images_per_prompt=a_, eta=a_, generator=a_, latents=a_, output_type=a_, return_dict=a_, callback=a_, callback_steps=a_, **a_, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
1
"""simple docstring""" from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration A_ = HfArgumentParser(InitializationArguments) A_ = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization A_ = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks A_ = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) A_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config A_ = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
28
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ): """simple docstring""" def run_func(snake_case__ : Tuple ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = random.Random() _snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 lowercase__ = "TensorFlow" @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return tf.__version__ def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_speed(_inference ) def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ ) return self._measure_speed(_train ) def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_memory(_inference ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ ) return self._measure_memory(_train ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : List[Any] = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Dict = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(a_, decoder_input_ids=a_, training=a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(a_, training=a_ ) _snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : Tuple = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : str = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Tuple = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : int = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): _snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0] _snake_case : str = tf.gradients(a_, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): _snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0] _snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables ) return gradients _snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(a_, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Dict = timeit.repeat( a_, repeat=self.args.repeat, number=10, ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ ) _snake_case : List[str] = meminfo.used _snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case : List[Any] = None else: _snake_case : int = measure_peak_memory_cpu(a_ ) _snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Tuple = stop_memory_tracing(a_ ) if memory is None: _snake_case : int = summary.total else: _snake_case : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
28
1
"""simple docstring""" import datasets from .evaluate import evaluate A_ = '''\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } ''' A_ = ''' This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. ''' A_ = ''' Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': the text of the answer references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the SQuAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}] >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}] >>> squad_metric = datasets.load_metric("squad") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )}, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ), codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""], reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""], ) def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: int ): '''simple docstring''' _snake_case : int = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} _snake_case : Tuple = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] _snake_case : int = evaluate(dataset=a_, predictions=a_ ) return score
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : str = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
28
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : float , snake_case__ : float , snake_case__ : float ): """simple docstring""" if (voltage, current, resistance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance < 0: raise ValueError("""Resistance cannot be negative""" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Optional[Any] = 100 _snake_case : Any = batch_size _snake_case : List[Any] = image_size _snake_case : Optional[Any] = patch_size _snake_case : str = num_channels _snake_case : Tuple = is_training _snake_case : Tuple = use_labels _snake_case : Any = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : str = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Optional[Any] = type_sequence_label_size _snake_case : Any = initializer_range _snake_case : List[str] = scope _snake_case : int = out_indices _snake_case : Optional[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : Dict = (image_size // patch_size) ** 2 _snake_case : str = num_patches + 1 def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[Any] = None _snake_case : Tuple = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : str = BeitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.type_sequence_label_size _snake_case : Any = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Any = 1 _snake_case : str = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ): '''simple docstring''' _snake_case : List[str] = self.num_labels _snake_case : List[Any] = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _snake_case : str = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = BeitModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) _snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_, nn.Linear ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue _snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Dict = False _snake_case : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Any = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : int = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = _config_zero_init(a_ ) for model_class in self.all_model_classes: _snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ ) _snake_case : Dict = self.default_image_processor _snake_case : Dict = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ ) # prepare bool_masked_pos _snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Optional[int] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ ) _snake_case : List[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(**a_ ) _snake_case : Optional[int] = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape, a_ ) _snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : str = 281 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( a_ ) _snake_case : int = self.default_image_processor _snake_case : Optional[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 21_841) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : int = model.to(a_ ) _snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] ) _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _snake_case : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ], device=a_, ) else: _snake_case : Optional[Any] = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : List[Any] = model.to(a_ ) _snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : str = Image.open(ds[0]["""file"""] ) _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] ) _snake_case : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape, a_ )
28
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase: '''simple docstring''' def __init__( self: Union[str, Any], a_: Optional[Any], a_: Any=13, a_: str=7, a_: Optional[Any]=True, a_: Tuple=True, a_: Dict=True, a_: int=True, a_: Optional[int]=99, a_: int=32, a_: List[str]=5, a_: int=4, a_: Tuple=37, a_: Dict="gelu", a_: Tuple=0.1, a_: List[Any]=0.1, a_: str=512, a_: Optional[int]=16, a_: Any=2, a_: Any=0.02, a_: Dict=3, a_: Optional[Any]=4, a_: Dict=None, ): '''simple docstring''' _snake_case : Any = parent _snake_case : int = batch_size _snake_case : List[Any] = seq_length _snake_case : List[str] = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : Union[str, Any] = use_token_type_ids _snake_case : Union[str, Any] = use_labels _snake_case : int = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : List[Any] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : List[str] = hidden_act _snake_case : List[Any] = hidden_dropout_prob _snake_case : Any = attention_probs_dropout_prob _snake_case : str = max_position_embeddings _snake_case : Any = type_vocab_size _snake_case : Any = type_sequence_label_size _snake_case : Dict = initializer_range _snake_case : Any = num_labels _snake_case : Dict = num_choices _snake_case : str = scope def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) _snake_case : Optional[int] = None if self.use_input_mask: _snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : Optional[int] = None if self.use_token_type_ids: _snake_case : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) _snake_case : Dict = None _snake_case : int = None _snake_case : int = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) _snake_case : Any = ids_tensor([self.batch_size], self.num_choices ) _snake_case : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Any, a_: Dict, a_: Dict, a_: Any, a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : Tuple = NystromformerModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_, attention_mask=a_, token_type_ids=a_ ) _snake_case : str = model(a_, token_type_ids=a_ ) _snake_case : int = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: Any, a_: int, a_: str, a_: Any, a_: List[Any], a_: Union[str, Any], a_: Union[str, Any], a_: str ): '''simple docstring''' _snake_case : int = NystromformerForMaskedLM(config=a_ ) model.to(a_ ) model.eval() _snake_case : str = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self: Optional[Any], a_: List[Any], a_: Any, a_: int, a_: Union[str, Any], a_: List[str], a_: Any, a_: Tuple ): '''simple docstring''' _snake_case : Tuple = NystromformerForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model( a_, attention_mask=a_, token_type_ids=a_, start_positions=a_, end_positions=a_, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Any, a_: Optional[int], a_: Tuple, a_: List[Any], a_: Dict, a_: Optional[int], a_: Any ): '''simple docstring''' _snake_case : int = self.num_labels _snake_case : str = NystromformerForSequenceClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: Dict, a_: Dict, a_: List[Any], a_: List[Any], a_: str, a_: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.num_labels _snake_case : List[Any] = NystromformerForTokenClassification(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: str, a_: Optional[int], a_: Any, a_: Optional[Any], a_: int, a_: Dict ): '''simple docstring''' _snake_case : Any = self.num_choices _snake_case : Dict = NystromformerForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() _snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() _snake_case : int = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() _snake_case : int = model( a_, attention_mask=a_, token_type_ids=a_, labels=a_, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : int = NystromformerModelTester(self ) _snake_case : str = ConfigTester(self, config_class=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _snake_case : Any = type self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = NystromformerModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch class lowercase( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Tuple = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) _snake_case : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): _snake_case : Optional[Any] = model(a_ )[0] _snake_case : int = torch.Size((1, 6, 768) ) self.assertEqual(output.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : List[Any] = """the [MASK] of Belgium is Brussels""" _snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) _snake_case : Optional[Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) _snake_case : Optional[Any] = tokenizer(a_, return_tensors="""pt""" ) with torch.no_grad(): _snake_case : List[Any] = model(encoding.input_ids ).logits _snake_case : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(a_ ), """capital""" )
28
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
1
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = LongformerTokenizer lowercase__ = True lowercase__ = LongformerTokenizerFast lowercase__ = True def UpperCamelCase_ ( self: Any ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _snake_case : List[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _snake_case : List[str] = dict(zip(a_, range(len(a_ ) ) ) ) _snake_case : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _snake_case : Any = {"""unk_token""": """<unk>"""} _snake_case : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) _snake_case : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp: fp.write(json.dumps(a_ ) + """\n""" ) with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp: fp.write("""\n""".join(a_ ) ) def UpperCamelCase_ ( self: Union[str, Any], **a_: List[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: str, **a_: str ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : int = """lower newer""" _snake_case : Optional[int] = """lower newer""" return input_text, output_text def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Any = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) _snake_case : int = """lower newer""" _snake_case : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] _snake_case : int = tokenizer.tokenize(a_ ) # , add_prefix_space=True) self.assertListEqual(a_, a_ ) _snake_case : Any = tokens + [tokenizer.unk_token] _snake_case : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : int = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""", add_special_tokens=a_ ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""", add_special_tokens=a_ ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) _snake_case : Union[str, Any] = tokenizer.encode("""sequence builders""", add_special_tokens=a_ ) _snake_case : Tuple = tokenizer.encode("""multi-sequence build""", add_special_tokens=a_ ) _snake_case : Tuple = tokenizer.encode( """sequence builders""", add_special_tokens=a_, add_prefix_space=a_ ) _snake_case : Optional[Any] = tokenizer.encode( """sequence builders""", """multi-sequence build""", add_special_tokens=a_, add_prefix_space=a_ ) _snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(a_ ) _snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_, a_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizer() _snake_case : Dict = """Encode this sequence.""" _snake_case : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments _snake_case : Union[str, Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ ) _snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(a_, a_ ) _snake_case : Optional[int] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ ) _snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(a_, a_ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) _snake_case : Union[str, Any] = tokenizer.encode(a_, add_special_tokens=a_ ) _snake_case : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(a_, a_ ) # Testing spaces after special tokens _snake_case : List[str] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(a_, lstrip=a_, rstrip=a_ )} ) # mask token has a left space _snake_case : str = tokenizer.convert_tokens_to_ids(a_ ) _snake_case : int = """Encode <mask> sequence""" _snake_case : Tuple = """Encode <mask>sequence""" _snake_case : List[str] = tokenizer.encode(a_ ) _snake_case : Optional[int] = encoded.index(a_ ) _snake_case : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(a_, a_ ) _snake_case : Union[str, Any] = tokenizer.encode(a_ ) _snake_case : Optional[int] = encoded.index(a_ ) _snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(a_, a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _snake_case : str = self.rust_tokenizer_class.from_pretrained(a_, **a_ ) _snake_case : Dict = self.tokenizer_class.from_pretrained(a_, **a_ ) _snake_case : List[str] = """A, <mask> AllenNLP sentence.""" _snake_case : Tuple = tokenizer_r.encode_plus(a_, add_special_tokens=a_, return_token_type_ids=a_ ) _snake_case : Optional[int] = tokenizer_p.encode_plus(a_, add_special_tokens=a_, return_token_type_ids=a_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ), sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ), sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ), ) _snake_case : int = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _snake_case : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( a_, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( a_, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): _snake_case : Any = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ ) _snake_case : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _snake_case : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""], a_ ) self.assertEqual(post_processor_state["""add_prefix_space"""], a_ ) self.assertEqual(post_processor_state["""trim_offsets"""], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _snake_case : Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` _snake_case : int = f"{text_of_1_token} {text_of_1_token}" _snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained( a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ ) _snake_case : Dict = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) ) self.assertEqual( encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), ) _snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ ) _snake_case : str = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) ) self.assertEqual( encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), ) _snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ ) _snake_case : Dict = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) ) self.assertEqual( encoding.offset_mapping[1], (len(a_ ), len(a_ ) + 1 + len(a_ )), ) _snake_case : Tuple = self.rust_tokenizer_class.from_pretrained( a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ ) _snake_case : Union[str, Any] = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) ) self.assertEqual( encoding.offset_mapping[1], (len(a_ ), len(a_ ) + 1 + len(a_ )), ) _snake_case : Tuple = f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained( a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ ) _snake_case : Union[str, Any] = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), ) _snake_case : Tuple = self.rust_tokenizer_class.from_pretrained( a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ ) _snake_case : Any = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(a_ )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )), ) _snake_case : List[str] = self.rust_tokenizer_class.from_pretrained( a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ ) _snake_case : Optional[Any] = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(a_ )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )), )
28
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class lowercase( nn.Module ): '''simple docstring''' def __init__( self: str ): '''simple docstring''' super().__init__() _snake_case : Dict = nn.Linear(3, 4 ) _snake_case : int = nn.BatchNormad(4 ) _snake_case : Tuple = nn.Linear(4, 5 ) def UpperCamelCase_ ( self: Dict, a_: int ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(a_ ) ) ) class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Tuple = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(a_, model.state_dict() ) _snake_case : str = os.path.join(a_, """index.json""" ) self.assertTrue(os.path.isfile(a_ ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: _snake_case : int = os.path.join(a_, f"{key}.dat" ) self.assertTrue(os.path.isfile(a_ ) ) # TODO: add tests on the fact weights are properly loaded def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : List[Any] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: _snake_case : Any = torch.randn(2, 3, dtype=a_ ) with TemporaryDirectory() as tmp_dir: _snake_case : List[Any] = offload_weight(a_, """weight""", a_, {} ) _snake_case : str = os.path.join(a_, """weight.dat""" ) self.assertTrue(os.path.isfile(a_ ) ) self.assertDictEqual(a_, {"""weight""": {"""shape""": [2, 3], """dtype""": str(a_ ).split(""".""" )[1]}} ) _snake_case : List[str] = load_offloaded_weight(a_, index["""weight"""] ) self.assertTrue(torch.equal(a_, a_ ) ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = ModelForTest() _snake_case : int = model.state_dict() _snake_case : str = {k: v for k, v in state_dict.items() if """linear2""" not in k} _snake_case : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(a_, a_ ) _snake_case : Tuple = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ ) # Every key is there with the right value self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(a_, weight_map[key] ) ) _snake_case : Dict = {k: v for k, v in state_dict.items() if """weight""" in k} _snake_case : str = {k: v for k, v in state_dict.items() if """weight""" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(a_, a_ ) _snake_case : Optional[int] = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ ) # Every key is there with the right value self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(a_, weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(a_, a_ ) # Duplicates are removed _snake_case : Union[str, Any] = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ ) # Every key is there with the right value self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(a_, weight_map[key] ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Tuple = {"""a.1""": 0, """a.10""": 1, """a.2""": 2} _snake_case : Optional[Any] = extract_submodules_state_dict(a_, ["""a.1""", """a.2"""] ) self.assertDictEqual(a_, {"""a.1""": 0, """a.2""": 2} ) _snake_case : List[Any] = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2} _snake_case : Tuple = extract_submodules_state_dict(a_, ["""a.1""", """a.2"""] ) self.assertDictEqual(a_, {"""a.1.a""": 0, """a.2.a""": 2} )
28
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
1
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''spiece.model'''} A_ = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } A_ = { '''AI-Sweden/gpt-sw3-126m''': 20_48, '''AI-Sweden/gpt-sw3-350m''': 20_48, '''AI-Sweden/gpt-sw3-1.6b''': 20_48, '''AI-Sweden/gpt-sw3-6.7b''': 20_48, '''AI-Sweden/gpt-sw3-20b''': 20_48, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] def __init__( self: Dict, a_: str, a_: Union[str, Any]=False, a_: Any=False, a_: Tuple=False, a_: Tuple=None, a_: str=None, a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Dict[str, Any]] = None, **a_: Union[str, Any], ): '''simple docstring''' _snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs _snake_case : Tuple = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) _snake_case : str = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _snake_case : Tuple = """<|endoftext|>""" if eos_token is None else eos_token _snake_case : str = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _snake_case : List[Any] = unk_token if pad_token is None else pad_token _snake_case : Union[str, Any] = eos_token if bos_token is None else bos_token else: _snake_case : str = """<pad>""" if pad_token is None else pad_token _snake_case : List[str] = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=a_, remove_space=a_, keep_accents=a_, bos_token=a_, eos_token=a_, unk_token=a_, pad_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, ) _snake_case : Dict = do_lower_case _snake_case : Any = remove_space _snake_case : Optional[int] = keep_accents _snake_case : Optional[int] = vocab_file _snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a_ ) # Used for whitespace normalization in input texts # fmt : off _snake_case : str = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _snake_case : Union[str, Any] = re.compile( f"[{''.join(map(a_, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8_203] ) )}]" ) def __getstate__( self: Any ): '''simple docstring''' _snake_case : Tuple = self.__dict__.copy() _snake_case : int = None return state def __setstate__( self: Any, a_: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self, """sp_model_kwargs""" ): _snake_case : Union[str, Any] = {} _snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return len(self.sp_model ) def UpperCamelCase_ ( self: Dict, a_: str ): '''simple docstring''' _snake_case : Dict = self.non_printing_characters_re.sub("""""", a_ ) # Normalize whitespaces _snake_case : Any = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization _snake_case : Optional[Any] = unicodedata.normalize("""NFC""", a_ ) return text def UpperCamelCase_ ( self: str, a_: str, **a_: Union[str, Any] ): '''simple docstring''' _snake_case : Any = self.preprocess_text(a_ ) return self.sp_model.encode(a_, out_type=a_ ) def UpperCamelCase_ ( self: List[Any], a_: str ): '''simple docstring''' return self.sp_model.PieceToId(a_ ) def UpperCamelCase_ ( self: Tuple, a_: int ): '''simple docstring''' return self.sp_model.IdToPiece(a_ ) @staticmethod def UpperCamelCase_ ( a_: str ): '''simple docstring''' return out_string def UpperCamelCase_ ( self: Tuple, a_: List[str] ): '''simple docstring''' _snake_case : List[str] = [] _snake_case : Optional[Any] = """""" _snake_case : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a_ ) + token _snake_case : int = True _snake_case : int = [] else: current_sub_tokens.append(a_ ) _snake_case : Union[str, Any] = False out_string += self.sp_model.decode(a_ ) return out_string def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _snake_case : Optional[Any] = os.path.join( a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_, """wb""" ) as fi: _snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,) def UpperCamelCase_ ( self: Any, a_: Union[str, List[str]], a_: Union[str, bool] = False ): '''simple docstring''' if isinstance(a_, a_ ): _snake_case : str = self.preprocess_text(a_ ) _snake_case : Optional[Any] = self.sp_model.encode(a_ ) else: _snake_case : int = [self.preprocess_text(a_ ) for t in text] _snake_case : List[Any] = self.sp_model.encode(a_ ) if return_tensors is True or return_tensors == "pt": _snake_case : List[str] = torch.tensor(a_ ) return token_ids def UpperCamelCase_ ( self: Optional[int], a_: Union[int, List[int]] ): '''simple docstring''' return self.sp_model.decode(a_ ) def UpperCamelCase_ ( self: Tuple, a_: "Conversation" ): '''simple docstring''' _snake_case : Union[str, Any] = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()] _snake_case : List[str] = ( f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(a_ ) + f"{self.bos_token}Bot:" ) return self.encode(text=a_ )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import csv import tweepy # Twitter API credentials A_ = '''''' A_ = '''''' A_ = '''''' A_ = '''''' def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : str = tweepy.OAuthHandler(snake_case__ , snake_case__ ) auth.set_access_token(snake_case__ , snake_case__ ) _snake_case : Optional[int] = tweepy.API(snake_case__ ) # initialize a list to hold all the tweepy Tweets _snake_case : Union[str, Any] = [] # make initial request for most recent tweets (200 is the maximum allowed count) _snake_case : List[str] = api.user_timeline(screen_name=snake_case__ , count=2_00 ) # save most recent tweets alltweets.extend(snake_case__ ) # save the id of the oldest tweet less one _snake_case : Optional[Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(snake_case__ ) > 0: print(F"getting tweets before {oldest}" ) # all subsequent requests use the max_id param to prevent duplicates _snake_case : List[Any] = api.user_timeline( screen_name=snake_case__ , count=2_00 , max_id=snake_case__ ) # save most recent tweets alltweets.extend(snake_case__ ) # update the id of the oldest tweet less one _snake_case : Optional[int] = alltweets[-1].id - 1 print(F"...{len(snake_case__ )} tweets downloaded so far" ) # transform the tweepy tweets into a 2D array that will populate the csv _snake_case : Optional[int] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"new_{screen_name}_tweets.csv" , """w""" ) as f: _snake_case : List[str] = csv.writer(snake_case__ ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(snake_case__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" import itertools import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCAmelCase__ (): """simple docstring""" _snake_case : int = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def UpperCAmelCase__ (snake_case__ : int = 1_00_01 ): """simple docstring""" return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(F'''{solution() = }''')
28
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
1
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''transformers''') A_ = ''' {0} = None ''' A_ = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' A_ = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" ) self.assertIsNone(a_ ) _snake_case : List[Any] = find_backend(""" if not is_tokenizers_available():""" ) self.assertEqual(a_, """tokenizers""" ) _snake_case : Optional[int] = find_backend(""" if not is_tensorflow_text_available():""" ) self.assertEqual(a_, """tensorflow_text""" ) _snake_case : List[Any] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" ) self.assertEqual(a_, """sentencepiece_and_tokenizers""" ) _snake_case : int = find_backend( """ if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" ) self.assertEqual(a_, """sentencepiece_and_tensorflow_text""" ) _snake_case : int = find_backend( """ if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" ) self.assertEqual(a_, """sentencepiece_and_tokenizers_and_vision""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : str = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""tensorflow_text""", a_ ) self.assertIn("""sentencepiece_and_tokenizers""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""BertModel""", objects["""torch"""] ) self.assertIn("""TFBertModel""", objects["""tf"""] ) self.assertIn("""FlaxBertModel""", objects["""flax"""] ) self.assertIn("""BertModel""", objects["""torch"""] ) self.assertIn("""TFBertTokenizer""", objects["""tensorflow_text"""] ) self.assertIn("""convert_slow_tokenizer""", objects["""sentencepiece_and_tokenizers"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : List[str] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : str = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ _snake_case : List[str] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Tuple = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) """ _snake_case : Tuple = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = MvpTokenizer lowercase__ = MvpTokenizerFast lowercase__ = True lowercase__ = filter_roberta_detectors def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : str = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _snake_case : Optional[int] = dict(zip(a_, range(len(a_ ) ) ) ) _snake_case : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _snake_case : Optional[int] = {"""unk_token""": """<unk>"""} _snake_case : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) _snake_case : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp: fp.write(json.dumps(a_ ) + """\n""" ) with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp: fp.write("""\n""".join(a_ ) ) def UpperCamelCase_ ( self: Union[str, Any], **a_: Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], **a_: List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: List[Any], a_: int ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" ) @cached_property def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" ) @require_torch def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : List[Any] = tokenizer(a_, max_length=len(a_ ), padding=a_, return_tensors="""pt""" ) self.assertIsInstance(a_, a_ ) self.assertEqual((2, 9), batch.input_ids.shape ) self.assertEqual((2, 9), batch.attention_mask.shape ) _snake_case : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(a_, a_ ) # Test that special tokens are reset @require_torch def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Dict = tokenizer(a_, padding=a_, return_tensors="""pt""" ) # check if input_ids are returned and no labels self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""labels""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) @require_torch def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : str = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : int = tokenizer(text_target=a_, max_length=32, padding="""max_length""", return_tensors="""pt""" ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) @require_torch def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Optional[Any] = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""], padding=a_, truncation=a_, return_tensors="""pt""" ) self.assertIsInstance(a_, a_ ) self.assertEqual(batch.input_ids.shape, (2, 1_024) ) @require_torch def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Union[str, Any] = ["""A long paragraph for summarization."""] _snake_case : Tuple = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Dict = tokenizer(a_, text_target=a_, return_tensors="""pt""" ) _snake_case : List[str] = inputs["""input_ids"""] _snake_case : List[str] = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _snake_case : Any = self.rust_tokenizer_class.from_pretrained(a_, **a_ ) _snake_case : Any = self.tokenizer_class.from_pretrained(a_, **a_ ) _snake_case : int = """A, <mask> AllenNLP sentence.""" _snake_case : List[Any] = tokenizer_r.encode_plus(a_, add_special_tokens=a_, return_token_type_ids=a_ ) _snake_case : List[str] = tokenizer_p.encode_plus(a_, add_special_tokens=a_, return_token_type_ids=a_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ), sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ), sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ), ) _snake_case : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _snake_case : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( a_, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( a_, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
28
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING A_ = logging.get_logger(__name__) @add_end_docstrings(__a ) class lowercase( __a ): '''simple docstring''' def __init__( self: Any, **a_: Tuple ): '''simple docstring''' super().__init__(**a_ ) requires_backends(self, """vision""" ) requires_backends(self, """torch""" ) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) self.check_model_type(a_ ) def UpperCamelCase_ ( self: List[Any], **a_: str ): '''simple docstring''' _snake_case : Optional[Any] = {} _snake_case : Tuple = {} _snake_case : Tuple = {} # preprocess args if "points_per_batch" in kwargs: _snake_case : Union[str, Any] = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: _snake_case : str = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: _snake_case : int = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: _snake_case : Optional[Any] = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: _snake_case : str = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: _snake_case : Dict = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: _snake_case : Optional[Any] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: _snake_case : Optional[Any] = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: _snake_case : Optional[int] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: _snake_case : Any = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: _snake_case : Optional[Any] = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: _snake_case : Union[str, Any] = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self: List[str], a_: Optional[int], *a_: Union[str, Any], a_: int=None, a_: Union[str, Any]=None, **a_: Optional[Any] ): '''simple docstring''' return super().__call__(a_, *a_, num_workers=a_, batch_size=a_, **a_ ) def UpperCamelCase_ ( self: Tuple, a_: Any, a_: Any=64, a_: int = 0, a_: float = 512 / 1_500, a_: Optional[int] = 32, a_: Optional[int] = 1, ): '''simple docstring''' _snake_case : List[str] = load_image(a_ ) _snake_case : str = self.image_processor.size["""longest_edge"""] _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = self.image_processor.generate_crop_boxes( a_, a_, a_, a_, a_, a_ ) _snake_case : int = self.image_processor(images=a_, return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": _snake_case : Tuple = self.get_inference_context() with inference_context(): _snake_case : Optional[Any] = self._ensure_tensor_on_device(a_, device=self.device ) _snake_case : str = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) _snake_case : str = image_embeddings _snake_case : List[str] = grid_points.shape[1] _snake_case : Optional[int] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0, a_, a_ ): _snake_case : Optional[Any] = grid_points[:, i : i + points_per_batch, :, :] _snake_case : str = input_labels[:, i : i + points_per_batch] _snake_case : int = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCamelCase_ ( self: str, a_: str, a_: Optional[Any]=0.88, a_: Optional[Any]=0.95, a_: Dict=0, a_: Any=1, ): '''simple docstring''' _snake_case : Any = model_inputs.pop("""input_boxes""" ) _snake_case : Dict = model_inputs.pop("""is_last""" ) _snake_case : Optional[Any] = model_inputs.pop("""original_sizes""" ).tolist() _snake_case : Optional[Any] = model_inputs.pop("""reshaped_input_sizes""" ).tolist() _snake_case : List[Any] = self.model(**a_ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks _snake_case : Optional[int] = model_outputs["""pred_masks"""] _snake_case : Tuple = self.image_processor.post_process_masks( a_, a_, a_, a_, binarize=a_ ) _snake_case : Optional[Any] = model_outputs["""iou_scores"""] _snake_case , _snake_case , _snake_case : Union[str, Any] = self.image_processor.filter_masks( masks[0], iou_scores[0], original_sizes[0], input_boxes[0], a_, a_, a_, a_, ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Optional[int]=False, a_: int=False, a_: Tuple=0.7, ): '''simple docstring''' _snake_case : Tuple = [] _snake_case : List[Any] = [] _snake_case : Union[str, Any] = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) _snake_case : List[Any] = torch.cat(a_ ) _snake_case : int = torch.cat(a_ ) _snake_case , _snake_case , _snake_case , _snake_case : Any = self.image_processor.post_process_for_mask_generation( a_, a_, a_, a_ ) _snake_case : List[str] = defaultdict(a_ ) for output in model_outputs: for k, v in output.items(): extra[k].append(a_ ) _snake_case : str = {} if output_rle_mask: _snake_case : List[str] = rle_mask if output_bboxes_mask: _snake_case : Union[str, Any] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
28
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
1
"""simple docstring""" import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP A_ = False try: A_ = _is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class lowercase: '''simple docstring''' def __init__( self: Dict, a_: str = None, a_: list = [] ): '''simple docstring''' _snake_case : Optional[int] = 0 _snake_case : int = choices _snake_case : Optional[int] = prompt if sys.platform == "win32": _snake_case : Any = """*""" else: _snake_case : List[str] = """➔ """ def UpperCamelCase_ ( self: List[str], a_: Optional[int], a_: str = "" ): '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index], 32, a_ ) else: forceWrite(self.choices[index], a_ ) def UpperCamelCase_ ( self: List[str], a_: int ): '''simple docstring''' if index == self.position: forceWrite(f" {self.arrow_char} " ) self.write_choice(a_ ) else: forceWrite(f" {self.choices[index]}" ) reset_cursor() def UpperCamelCase_ ( self: Optional[Any], a_: Direction, a_: int = 1 ): '''simple docstring''' _snake_case : Union[str, Any] = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(a_ ) move_cursor(a_, direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["""up"""] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP["""down"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["""newline"""] ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' move_cursor(len(self.choices ) - self.position, """DOWN""" ) return self.position @input.mark(KEYMAP["""interrupt"""] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' move_cursor(len(self.choices ) - self.position, """DOWN""" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(a_ )] for number in range(10 )] ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Any = int(chr(self.current_selection ) ) _snake_case : Optional[Any] = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP, -movement ) elif self.position < index: self.move_direction(Direction.DOWN, a_ ) else: return else: return def UpperCamelCase_ ( self: Tuple, a_: int = 0 ): '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt, """\n""" ) if in_colab: forceWrite("""Please input a choice index (starting from 0), and press enter""", """\n""" ) else: forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""", """\n""" ) _snake_case : str = default_choice for i in range(len(self.choices ) ): self.print_choice(a_ ) forceWrite("""\n""" ) move_cursor(len(self.choices ) - self.position, """UP""" ) with cursor.hide(): while True: if in_colab: try: _snake_case : Optional[Any] = int(builtins.input() ) except ValueError: _snake_case : List[str] = default_choice else: _snake_case : int = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1, """UP""" ) clear_line() self.write_choice(a_, """\n""" ) return choice
28
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
1
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 A_ = get_tests_dir('''fixtures''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = mock.Mock() _snake_case : Optional[int] = 500 _snake_case : Union[str, Any] = {} _snake_case : Tuple = HTTPError _snake_case : Any = {} # Download this model to make sure it's in the cache. _snake_case : Tuple = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""", return_value=a_ ) as mock_head: _snake_case : List[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" ) @is_staging_test class lowercase( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = TOKEN HfFolder.save_token(a_ ) @classmethod def UpperCamelCase_ ( cls: Tuple ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="""test-feature-extractor""" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="""valid_org/test-feature-extractor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="""test-dynamic-feature-extractor""" ) except HTTPError: pass def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = WavaVecaFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub("""test-feature-extractor""", use_auth_token=self._token ) _snake_case : Tuple = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) # Reset repo delete_repo(token=self._token, repo_id="""test-feature-extractor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a_, repo_id="""test-feature-extractor""", push_to_hub=a_, use_auth_token=self._token ) _snake_case : str = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Tuple = WavaVecaFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub("""valid_org/test-feature-extractor""", use_auth_token=self._token ) _snake_case : Any = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) # Reset repo delete_repo(token=self._token, repo_id="""valid_org/test-feature-extractor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a_, repo_id="""valid_org/test-feature-extractor-org""", push_to_hub=a_, use_auth_token=self._token ) _snake_case : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() _snake_case : int = CustomFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub("""test-dynamic-feature-extractor""", use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""}, ) _snake_case : List[Any] = AutoFeatureExtractor.from_pretrained( f"{USER}/test-dynamic-feature-extractor", trust_remote_code=a_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, """CustomFeatureExtractor""" )
28
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' raise NotImplementedError()
28
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A_ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ReformerTokenizer lowercase__ = ReformerTokenizerFast lowercase__ = True lowercase__ = False lowercase__ = True def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' super().setUp() _snake_case : Any = ReformerTokenizer(a_, keep_accents=a_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = """<s>""" _snake_case : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], """<unk>""" ) self.assertEqual(vocab_keys[1], """<s>""" ) self.assertEqual(vocab_keys[-1], """j""" ) self.assertEqual(len(a_ ), 1_000 ) def UpperCamelCase_ ( self: int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1_000 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' if not self.test_rust_tokenizer: return _snake_case : Union[str, Any] = self.get_tokenizer() _snake_case : Union[str, Any] = self.get_rust_tokenizer() _snake_case : Tuple = """I was born in 92000, and this is falsé.""" _snake_case : List[Any] = tokenizer.tokenize(a_ ) _snake_case : Optional[int] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_, a_ ) _snake_case : Optional[int] = tokenizer.encode(a_, add_special_tokens=a_ ) _snake_case : Dict = rust_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) _snake_case : Tuple = self.get_rust_tokenizer() _snake_case : List[str] = tokenizer.encode(a_ ) _snake_case : List[str] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: str=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _snake_case : int = self.rust_tokenizer_class.from_pretrained(a_, **a_ ) # Simple input _snake_case : List[Any] = """This is a simple input""" _snake_case : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""] _snake_case : Union[str, Any] = ("""This is a simple input""", """This is a pair""") _snake_case : int = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" ) # Simple input self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" ) # Simple input self.assertRaises( a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", ) # Pair input self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" ) # Pair input self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" ) # Pair input self.assertRaises( a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = ReformerTokenizer(a_, keep_accents=a_ ) _snake_case : List[str] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a_ ), [285, 46, 10, 170, 382], ) _snake_case : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( a_, [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ], ) _snake_case : List[Any] = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual( a_, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) _snake_case : List[str] = tokenizer.convert_ids_to_tokens(a_ ) self.assertListEqual( a_, [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ], ) @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" ) @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = """Hello World!""" _snake_case : Tuple = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) _snake_case : List[Any] = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) ) @require_torch @slow def UpperCamelCase_ ( self: str ): '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence _snake_case : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] _snake_case : Tuple = """ """.join(a_ ) _snake_case : Union[str, Any] = self.big_tokenizer.encode_plus(a_, return_tensors="""pt""" ) _snake_case : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="""pt""" ) _snake_case : List[str] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) _snake_case : Optional[Any] = encoded_sequence["""input_ids"""].shape _snake_case : Dict = ReformerModel(a_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a_ ) model(**a_ ) @slow def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Dict = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 _snake_case : Tuple = [ """This is a very simple sentence.""", """The quick brown fox jumps over the lazy dog.""", ] self.tokenizer_integration_test_util( expected_encoding=a_, model_name="""google/reformer-crime-and-punishment""", revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""", padding=a_, sequences=a_, )
28
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
1
"""simple docstring""" A_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : str ): """simple docstring""" _snake_case : Any = [False] * len(snake_case__ ) _snake_case : Tuple = [s] _snake_case : Tuple = True while queue: _snake_case : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(snake_case__ ) _snake_case : Dict = True _snake_case : Union[str, Any] = u return visited[t] def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ): """simple docstring""" _snake_case : Optional[Any] = [-1] * (len(snake_case__ )) _snake_case : Tuple = 0 _snake_case : Optional[Any] = [] _snake_case : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy. while bfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ ): _snake_case : int = float("""Inf""" ) _snake_case : Union[str, Any] = sink while s != source: # Find the minimum value in select path _snake_case : int = min(snake_case__ , graph[parent[s]][s] ) _snake_case : Union[str, Any] = parent[s] max_flow += path_flow _snake_case : Optional[Any] = sink while v != source: _snake_case : Tuple = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _snake_case : str = parent[v] for i in range(len(snake_case__ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" return x + 2 class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = """x = 3""" _snake_case : str = {} _snake_case : Optional[Any] = evaluate(a_, {}, state=a_ ) assert result == 3 self.assertDictEqual(a_, {"""x""": 3} ) _snake_case : Union[str, Any] = """x = y""" _snake_case : Optional[int] = {"""y""": 5} _snake_case : str = evaluate(a_, {}, state=a_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(a_, {"""x""": 5, """y""": 5} ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Any = """y = add_two(x)""" _snake_case : Optional[Any] = {"""x""": 3} _snake_case : Union[str, Any] = evaluate(a_, {"""add_two""": add_two}, state=a_ ) assert result == 5 self.assertDictEqual(a_, {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: _snake_case : List[Any] = evaluate(a_, {}, state=a_ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Tuple = """x = 3""" _snake_case : Dict = {} _snake_case : str = evaluate(a_, {}, state=a_ ) assert result == 3 self.assertDictEqual(a_, {"""x""": 3} ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Any = """test_dict = {'x': x, 'y': add_two(x)}""" _snake_case : Union[str, Any] = {"""x""": 3} _snake_case : Optional[Any] = evaluate(a_, {"""add_two""": add_two}, state=a_ ) self.assertDictEqual(a_, {"""x""": 3, """y""": 5} ) self.assertDictEqual(a_, {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : List[str] = """x = 3\ny = 5""" _snake_case : Dict = {} _snake_case : str = evaluate(a_, {}, state=a_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(a_, {"""x""": 3, """y""": 5} ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = """text = f'This is x: {x}.'""" _snake_case : int = {"""x""": 3} _snake_case : Union[str, Any] = evaluate(a_, {}, state=a_ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(a_, {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5""" _snake_case : Dict = {"""x""": 3} _snake_case : int = evaluate(a_, {}, state=a_ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(a_, {"""x""": 3, """y""": 2} ) _snake_case : Optional[Any] = {"""x""": 8} _snake_case : Dict = evaluate(a_, {}, state=a_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(a_, {"""x""": 8, """y""": 5} ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : List[Any] = """test_list = [x, add_two(x)]""" _snake_case : Dict = {"""x""": 3} _snake_case : str = evaluate(a_, {"""add_two""": add_two}, state=a_ ) self.assertListEqual(a_, [3, 5] ) self.assertDictEqual(a_, {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[Any] = """y = x""" _snake_case : List[Any] = {"""x""": 3} _snake_case : List[Any] = evaluate(a_, {}, state=a_ ) assert result == 3 self.assertDictEqual(a_, {"""x""": 3, """y""": 3} ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = """test_list = [x, add_two(x)]\ntest_list[1]""" _snake_case : Any = {"""x""": 3} _snake_case : Optional[Any] = evaluate(a_, {"""add_two""": add_two}, state=a_ ) assert result == 5 self.assertDictEqual(a_, {"""x""": 3, """test_list""": [3, 5]} ) _snake_case : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" _snake_case : List[str] = {"""x""": 3} _snake_case : Union[str, Any] = evaluate(a_, {"""add_two""": add_two}, state=a_ ) assert result == 5 self.assertDictEqual(a_, {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = """x = 0\nfor i in range(3):\n x = i""" _snake_case : Union[str, Any] = {} _snake_case : List[Any] = evaluate(a_, {"""range""": range}, state=a_ ) assert result == 2 self.assertDictEqual(a_, {"""x""": 2, """i""": 2} )
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" from __future__ import annotations from collections import deque class lowercase: '''simple docstring''' def __init__( self: Any, a_: list[str] ): '''simple docstring''' _snake_case : list[dict] = [] self.adlist.append( {"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} ) for keyword in keywords: self.add_keyword(a_ ) self.set_fail_transitions() def UpperCamelCase_ ( self: Optional[int], a_: int, a_: str ): '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' _snake_case : Dict = 0 for character in keyword: _snake_case : str = self.find_next_state(a_, a_ ) if next_state is None: self.adlist.append( { """value""": character, """next_states""": [], """fail_state""": 0, """output""": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) _snake_case : Optional[Any] = len(self.adlist ) - 1 else: _snake_case : List[Any] = next_state self.adlist[current_state]["output"].append(a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : deque = deque() for node in self.adlist[0]["next_states"]: q.append(a_ ) _snake_case : Optional[int] = 0 while q: _snake_case : List[str] = q.popleft() for child in self.adlist[r]["next_states"]: q.append(a_ ) _snake_case : Any = self.adlist[r]["""fail_state"""] while ( self.find_next_state(a_, self.adlist[child]["""value"""] ) is None and state != 0 ): _snake_case : List[Any] = self.adlist[state]["""fail_state"""] _snake_case : Dict = self.find_next_state( a_, self.adlist[child]["""value"""] ) if self.adlist[child]["fail_state"] is None: _snake_case : Optional[Any] = 0 _snake_case : str = ( self.adlist[child]["""output"""] + self.adlist[self.adlist[child]["""fail_state"""]]["""output"""] ) def UpperCamelCase_ ( self: Tuple, a_: str ): '''simple docstring''' _snake_case : dict = {} # returns a dict with keywords and list of its occurrences _snake_case : Any = 0 for i in range(len(a_ ) ): while ( self.find_next_state(a_, string[i] ) is None and current_state != 0 ): _snake_case : List[str] = self.adlist[current_state]["""fail_state"""] _snake_case : List[str] = self.find_next_state(a_, string[i] ) if next_state is None: _snake_case : int = 0 else: _snake_case : Tuple = next_state for key in self.adlist[current_state]["output"]: if key not in result: _snake_case : int = [] result[key].append(i - len(a_ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1