code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowercase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=None , snake_case=True , snake_case=True , snake_case=None , ) -> Any: _UpperCAmelCase = size if size is not None else {'height': 20, 'width': 20} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = do_convert_rgb _UpperCAmelCase = [512, 1024, 2048, 4096] _UpperCAmelCase = patch_size if patch_size is not None else {'height': 16, 'width': 16} def lowerCamelCase_ ( self ) -> Optional[int]: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' _UpperCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11, reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''', ) @require_torch @require_vision class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = PixaStructImageProcessingTester(self ) @property def lowerCamelCase_ ( self ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , 'do_normalize' ) ) self.assertTrue(hasattr(snake_case , 'do_convert_rgb' ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = self.image_processor_tester.prepare_dummy_image() _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) _UpperCAmelCase = 2048 _UpperCAmelCase = image_processor(snake_case , return_tensors='pt' , max_patches=snake_case ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) ) def lowerCamelCase_ ( self ) -> List[str]: # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input _UpperCAmelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( snake_case , return_tensors='pt' , max_patches=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCamelCase_ ( self ) -> Union[str, Any]: # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input _UpperCAmelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 _UpperCAmelCase = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case ): _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=snake_case ).flattened_patches _UpperCAmelCase = 'Hello' _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=snake_case , header_text=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( snake_case , return_tensors='pt' , max_patches=snake_case , header_text=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCamelCase_ ( self ) -> Union[str, Any]: # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) _UpperCAmelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( snake_case , return_tensors='pt' , max_patches=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCamelCase_ ( self ) -> int: # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input _UpperCAmelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( snake_case , return_tensors='pt' , max_patches=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11, reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''', ) @require_torch @require_vision class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = PixaStructImageProcessingTester(self , num_channels=4 ) _UpperCAmelCase = 3 @property def lowerCamelCase_ ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , 'do_normalize' ) ) self.assertTrue(hasattr(snake_case , 'do_convert_rgb' ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input _UpperCAmelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( snake_case , return_tensors='pt' , max_patches=snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def UpperCAmelCase ( A : Tuple , A : List[str]=False ): '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'If set, {key} must be yes or no.' ) return _value lowercase = parse_flag_from_env('''RUN_SLOW''', default=False) lowercase = parse_flag_from_env('''RUN_REMOTE''', default=False) lowercase = parse_flag_from_env('''RUN_LOCAL''', default=True) lowercase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression lowercase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') lowercase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') lowercase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio lowercase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam lowercase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility lowercase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows lowercase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def UpperCAmelCase ( A : Optional[Any] ): '''simple docstring''' try: import faiss # noqa except ImportError: _UpperCAmelCase = unittest.skip('test requires faiss' )(A ) return test_case def UpperCAmelCase ( A : List[Any] ): '''simple docstring''' try: import regex # noqa except ImportError: _UpperCAmelCase = unittest.skip('test requires regex' )(A ) return test_case def UpperCAmelCase ( A : List[str] ): '''simple docstring''' try: import elasticsearch # noqa except ImportError: _UpperCAmelCase = unittest.skip('test requires elasticsearch' )(A ) return test_case def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' try: import sqlalchemy # noqa except ImportError: _UpperCAmelCase = unittest.skip('test requires sqlalchemy' )(A ) return test_case def UpperCAmelCase ( A : str ): '''simple docstring''' if not config.TORCH_AVAILABLE: _UpperCAmelCase = unittest.skip('test requires PyTorch' )(A ) return test_case def UpperCAmelCase ( A : Tuple ): '''simple docstring''' if not config.TF_AVAILABLE: _UpperCAmelCase = unittest.skip('test requires TensorFlow' )(A ) return test_case def UpperCAmelCase ( A : Optional[Any] ): '''simple docstring''' if not config.JAX_AVAILABLE: _UpperCAmelCase = unittest.skip('test requires JAX' )(A ) return test_case def UpperCAmelCase ( A : str ): '''simple docstring''' if not config.PIL_AVAILABLE: _UpperCAmelCase = unittest.skip('test requires Pillow' )(A ) return test_case def UpperCAmelCase ( A : Union[str, Any] ): '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(A ) else: return test_case def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(A ) else: return test_case def UpperCAmelCase ( A : Any ): '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(A ) else: return test_case def UpperCAmelCase ( A : Any ): '''simple docstring''' def _require_spacy_model(A : Dict ): try: import spacy # noqa F401 spacy.load(A ) except ImportError: return unittest.skip('test requires spacy' )(A ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(A ) )(A ) else: return test_case return _require_spacy_model def UpperCAmelCase ( A : str ): '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(A ) else: return test_case def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(A ) else: return test_case def UpperCAmelCase ( A : List[str] ): '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: _UpperCAmelCase = unittest.skip('test is slow' )(A ) return test_case def UpperCAmelCase ( A : List[Any] ): '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: _UpperCAmelCase = unittest.skip('test is local' )(A ) return test_case def UpperCAmelCase ( A : Dict ): '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: _UpperCAmelCase = unittest.skip('test is packaged' )(A ) return test_case def UpperCAmelCase ( A : List[Any] ): '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: _UpperCAmelCase = unittest.skip('test requires remote' )(A ) return test_case def UpperCAmelCase ( *A : List[Any] ): '''simple docstring''' def decorate(cls : Tuple ): for name, fn in cls.__dict__.items(): if callable(A ) and name.startswith('test' ): for decorator in decorators: _UpperCAmelCase = decorator(A ) setattr(cls , A , A ) return cls return decorate class lowercase__ ( A ): '''simple docstring''' pass class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 1 _UpperCAmelCase = 2 @contextmanager def UpperCAmelCase ( A : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , A : Optional[int]=1e-16 ): '''simple docstring''' _UpperCAmelCase = requests.Session().request def timeout_request(A : str , A : Tuple , A : List[Any] , **A : List[str] ): # Change the url to an invalid url so that the connection hangs _UpperCAmelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' ) _UpperCAmelCase = timeout try: return online_request(A , A , **A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier _UpperCAmelCase = url _UpperCAmelCase = e.args[0] _UpperCAmelCase = (max_retry_error.args[0].replace('10.255.255.1' , f'OfflineMock[{url}]' ),) _UpperCAmelCase = (max_retry_error,) raise def raise_connection_error(A : Dict , A : int , **A : Optional[int] ): raise requests.ConnectionError('Offline mode is enabled.' , request=A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , A ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def UpperCAmelCase ( *A : List[str] , **A : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*A , **A ) as tmp_dir: try: os.chdir(A ) yield finally: os.chdir(A ) @contextmanager def UpperCAmelCase ( ): '''simple docstring''' import gc gc.collect() _UpperCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def UpperCAmelCase ( ): '''simple docstring''' import gc gc.collect() _UpperCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def UpperCAmelCase ( A : Optional[Any] , A : str ): '''simple docstring''' return deepcopy(A ).integers(0 , 100 , 10 ).tolist() == deepcopy(A ).integers(0 , 100 , 10 ).tolist() def UpperCAmelCase ( A : int ): '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(A : str , *A : Dict , **A : List[str] ): try: return func(*A , **A ) except HTTPError as err: if str(A ).startswith('500' ) or str(A ).startswith('502' ): pytest.xfail(str(A ) ) raise err return decorator.decorator(_wrapper , A ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def UpperCAmelCase ( A : List[str] , A : int ): '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(A ) else: break async def UpperCAmelCase ( A : str , A : Tuple=None , A : List[str]=None , A : Dict=None , A : int=False , A : Dict=False ): '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(A ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(A : Optional[int] , A : Optional[Any] , A : Optional[int] , A : Optional[Any]="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(A ) if not quiet: print(A , A , file=A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda A : tee(A , A , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda A : tee(A , A , sys.stderr , label='stderr:' ) ), ] , timeout=A , ) return _RunOutput(await p.wait() , A , A ) def UpperCAmelCase ( A : Tuple , A : Any=None , A : Dict=None , A : Optional[int]=180 , A : str=False , A : Dict=True ): '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(A , env=A , stdin=A , timeout=A , quiet=A , echo=A ) ) _UpperCAmelCase = ' '.join(A ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' f'The combined stderr from workers follows:\n{stderr}' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'\'{cmd_str}\' produced no output.' ) return result def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) _UpperCAmelCase = re.sub(r'^gw' , '' , A , 0 , re.M ) return int(A ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = 2_9500 _UpperCAmelCase = pytest_xdist_worker_id() return port + uniq_delta
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCAmelCase ( A : Optional[int] , A : List[str] , A : int ): '''simple docstring''' if gpta_config_file == "": _UpperCAmelCase = GPTaConfig() else: _UpperCAmelCase = GPTaConfig.from_json_file(A ) _UpperCAmelCase = GPTaModel(A ) # Load weights from numpy load_tf_weights_in_gpta(A , A , A ) # Save pytorch-model _UpperCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME _UpperCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , A ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(A , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) lowercase = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" lowercase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} lowercase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def UpperCAmelCase ( A : dict[int, list[int]] , A : int , A : list[bool] ): '''simple docstring''' _UpperCAmelCase = True _UpperCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(A , A , A ) order.append(A ) return order def UpperCAmelCase ( A : dict[int, list[int]] , A : int , A : list[bool] ): '''simple docstring''' _UpperCAmelCase = True _UpperCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(A , A , A ) return component def UpperCAmelCase ( A : dict[int, list[int]] ): '''simple docstring''' _UpperCAmelCase = len(A ) * [False] _UpperCAmelCase = {vert: [] for vert in range(len(A ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(A ) _UpperCAmelCase = [] for i, was_visited in enumerate(A ): if not was_visited: order += topology_sort(A , A , A ) _UpperCAmelCase = [] _UpperCAmelCase = len(A ) * [False] for i in range(len(A ) ): _UpperCAmelCase = order[len(A ) - i - 1] if not visited[vert]: _UpperCAmelCase = find_components(A , A , A ) components_list.append(A ) return components_list
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=False , snake_case=True , snake_case=False , snake_case=False , snake_case=19 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = EsmForProteinFolding(config=snake_case ).float() model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , attention_mask=snake_case ) _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = model(snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = False _UpperCAmelCase = (EsmForProteinFolding,) if is_torch_available() else () _UpperCAmelCase = () _UpperCAmelCase = {} if is_torch_available() else {} _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = EsmFoldModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Dict: self.config_tester.run_common_tests() def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) @unittest.skip('Does not support attention outputs' ) def lowerCamelCase_ ( self ) -> Tuple: pass @unittest.skip def lowerCamelCase_ ( self ) -> List[str]: pass @unittest.skip('Esm does not support embedding resizing' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip('Esm does not support embedding resizing' ) def lowerCamelCase_ ( self ) -> Tuple: pass @unittest.skip('ESMFold does not support passing input embeds!' ) def lowerCamelCase_ ( self ) -> Tuple: pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCamelCase_ ( self ) -> Optional[int]: pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCamelCase_ ( self ) -> Optional[Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCamelCase_ ( self ) -> List[Any]: pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def lowerCamelCase_ ( self ) -> List[str]: pass @unittest.skip('ESMFold only has one output format.' ) def lowerCamelCase_ ( self ) -> List[str]: pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def lowerCamelCase_ ( self ) -> Optional[Any]: pass @unittest.skip('ESMFold does not support input chunking.' ) def lowerCamelCase_ ( self ) -> Optional[Any]: pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowerCamelCase_ ( self ) -> List[str]: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowerCamelCase_ ( self ) -> Any: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowerCamelCase_ ( self ) -> List[Any]: pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def lowerCamelCase_ ( self ) -> Optional[Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass @require_torch class lowercase__ ( A ): '''simple docstring''' @slow def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() _UpperCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _UpperCAmelCase = model(snake_case )['positions'] _UpperCAmelCase = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , snake_case , atol=1E-4 ) )
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=False , snake_case=True , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , ) -> List[str]: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def lowerCamelCase_ ( self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DonutImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = DonutImageProcessingTester(self ) @property def lowerCamelCase_ ( self ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , 'do_resize' ) ) self.assertTrue(hasattr(snake_case , 'size' ) ) self.assertTrue(hasattr(snake_case , 'do_thumbnail' ) ) self.assertTrue(hasattr(snake_case , 'do_align_long_axis' ) ) self.assertTrue(hasattr(snake_case , 'do_pad' ) ) self.assertTrue(hasattr(snake_case , 'do_normalize' ) ) self.assertTrue(hasattr(snake_case , 'image_mean' ) ) self.assertTrue(hasattr(snake_case , 'image_std' ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def lowerCamelCase_ ( self ) -> Any: pass @is_flaky() def lowerCamelCase_ ( self ) -> Optional[Any]: # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def lowerCamelCase_ ( self ) -> List[Any]: # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def lowerCamelCase_ ( self ) -> Dict: # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowercase = logging.getLogger(__name__) lowercase = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = field( default=A, metadata={ '''help''': ( '''The model checkpoint for weights initialization. Leave None if you want to train a model from''' ''' scratch.''' ) }, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )}, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = field( default=A, metadata={'''help''': '''The input training data file (a text file).'''} ) _UpperCAmelCase = field( default=A, metadata={ '''help''': ( '''The input training data files (multiple files in glob format). ''' '''Very often splitting large files to smaller files can prevent tokenizer going out of memory''' ) }, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''}, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''}, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''}, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} ) _UpperCAmelCase = field(default=A, metadata={'''help''': '''Whether ot not to use whole word mask.'''} ) _UpperCAmelCase = field( default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) _UpperCAmelCase = field( default=1 / 6, metadata={ '''help''': ( '''Ratio of length of a span of masked tokens to surrounding context length for permutation language''' ''' modeling.''' ) }, ) _UpperCAmelCase = field( default=5, metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} ) _UpperCAmelCase = field( default=-1, metadata={ '''help''': ( '''Optional input sequence length after tokenization.''' '''The training dataset will be truncated in block of this size for training.''' '''Default to the model max input length for single sentence inputs (take into account special tokens).''' ) }, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ): '''simple docstring''' def _dataset(A : List[Any] , A : Union[str, Any]=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , ) return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size ) else: return TextDataset( tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: _UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: _UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: _UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: _UpperCAmelCase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch' ) _UpperCAmelCase = AutoModelWithLMHead.from_config(A ) model.resize_token_embeddings(len(A ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: _UpperCAmelCase = tokenizer.max_len # Our input block size will be the max possible for the model else: _UpperCAmelCase = min(data_args.block_size , tokenizer.max_len ) # Get datasets _UpperCAmelCase = ( get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) _UpperCAmelCase = ( get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": _UpperCAmelCase = DataCollatorForPermutationLanguageModeling( tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: _UpperCAmelCase = DataCollatorForWholeWordMask( tokenizer=A , mlm_probability=data_args.mlm_probability ) else: _UpperCAmelCase = DataCollatorForLanguageModeling( tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _UpperCAmelCase = Trainer( model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , ) # Training if training_args.do_train: _UpperCAmelCase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=A ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCAmelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate() _UpperCAmelCase = math.exp(eval_output['eval_loss'] ) _UpperCAmelCase = {'perplexity': perplexity} _UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_lm.txt' ) if trainer.is_world_master(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , A , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(A ) return results def UpperCAmelCase ( A : int ): '''simple docstring''' main() if __name__ == "__main__": main()
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : list[int] , A : int ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = 0 _UpperCAmelCase = sum(A ) create_state_space_tree(A , A , A , A , A , A ) return result def UpperCAmelCase ( A : list[int] , A : int , A : int , A : list[int] , A : list[list[int]] , A : int , ): '''simple docstring''' if sum(A ) > max_sum or (remaining_nums_sum + sum(A )) < max_sum: return if sum(A ) == max_sum: result.append(A ) return for index in range(A , len(A ) ): create_state_space_tree( A , A , index + 1 , [*path, nums[index]] , A , remaining_nums_sum - nums[index] , ) lowercase = [3, 34, 4, 12, 5, 2] lowercase = 9 lowercase = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" import argparse import os import re lowercase = '''src/transformers/models/auto''' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowercase = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''') # re pattern that matches identifiers in mappings lowercase = re.compile(r'''\s*\(\s*"(\S[^"]+)"''') def UpperCAmelCase ( A : List[str] , A : bool = False ): '''simple docstring''' with open(A , 'r' , encoding='utf-8' ) as f: _UpperCAmelCase = f.read() _UpperCAmelCase = content.split('\n' ) _UpperCAmelCase = [] _UpperCAmelCase = 0 while line_idx < len(A ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _UpperCAmelCase = len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(' ' * indent + '(' ): new_lines.append(lines[line_idx] ) line_idx += 1 _UpperCAmelCase = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _UpperCAmelCase = line_idx while not lines[line_idx].startswith(' ' * indent + ')' ): line_idx += 1 blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _UpperCAmelCase = sorted(A , key=lambda A : _re_identifier.search(A ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(A ) ) elif "\n".join(A ) != content: return True def UpperCAmelCase ( A : bool = False ): '''simple docstring''' _UpperCAmelCase = [os.path.join(A , A ) for f in os.listdir(A ) if f.endswith('.py' )] _UpperCAmelCase = [sort_auto_mapping(A , overwrite=A ) for fname in fnames] if not overwrite and any(A ): _UpperCAmelCase = [f for f, d in zip(A , A ) if d] raise ValueError( f'The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix' ' this.' ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowercase = parser.parse_args() sort_all_auto_mappings(not args.check_only)
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
1
"""simple docstring""" def UpperCAmelCase ( A : str ): '''simple docstring''' return " ".join( ''.join(word[::-1] ) if len(A ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
1
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , **snake_case ) -> List[str]: requires_backends(self , ['bs4'] ) super().__init__(**snake_case ) def lowerCamelCase_ ( self , snake_case ) -> List[Any]: _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag _UpperCAmelCase = parent.find_all(child.name , recursive=snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(snake_case ) else next(i for i, s in enumerate(snake_case , 1 ) if s is child ) ) _UpperCAmelCase = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = BeautifulSoup(snake_case , 'html.parser' ) _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] for element in html_code.descendants: if type(snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue _UpperCAmelCase = html.unescape(snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(snake_case ) _UpperCAmelCase , _UpperCAmelCase = self.xpath_soup(snake_case ) stringaxtag_seq.append(snake_case ) stringaxsubs_seq.append(snake_case ) if len(snake_case ) != len(snake_case ): raise ValueError('Number of doc strings and xtags does not correspond' ) if len(snake_case ) != len(snake_case ): raise ValueError('Number of doc strings and xsubs does not correspond' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[str]: _UpperCAmelCase = '' for tagname, subs in zip(snake_case , snake_case ): xpath += f'/{tagname}' if subs != 0: xpath += f'[{subs}]' return xpath def __call__( self , snake_case ) -> BatchFeature: _UpperCAmelCase = False # Check that strings has a valid type if isinstance(snake_case , snake_case ): _UpperCAmelCase = True elif isinstance(snake_case , (list, tuple) ): if len(snake_case ) == 0 or isinstance(html_strings[0] , snake_case ): _UpperCAmelCase = True if not valid_strings: raise ValueError( 'HTML strings must of type `str`, `List[str]` (batch of examples), ' f'but is of type {type(snake_case )}.' ) _UpperCAmelCase = bool(isinstance(snake_case , (list, tuple) ) and (isinstance(html_strings[0] , snake_case )) ) if not is_batched: _UpperCAmelCase = [html_strings] # Get nodes + xpaths _UpperCAmelCase = [] _UpperCAmelCase = [] for html_string in html_strings: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.get_three_from_single(snake_case ) nodes.append(snake_case ) _UpperCAmelCase = [] for node, tag_list, sub_list in zip(snake_case , snake_case , snake_case ): _UpperCAmelCase = self.construct_xpath(snake_case , snake_case ) xpath_strings.append(snake_case ) xpaths.append(snake_case ) # return as Dict _UpperCAmelCase = {'nodes': nodes, 'xpaths': xpaths} _UpperCAmelCase = BatchFeature(data=snake_case , tensor_type=snake_case ) return encoded_inputs
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
1
"""simple docstring""" import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = SwinConfig() _UpperCAmelCase = swin_name.split('_' ) _UpperCAmelCase = name_split[1] _UpperCAmelCase = int(name_split[4] ) _UpperCAmelCase = int(name_split[3][-1] ) if model_size == "tiny": _UpperCAmelCase = 96 _UpperCAmelCase = (2, 2, 6, 2) _UpperCAmelCase = (3, 6, 12, 24) elif model_size == "small": _UpperCAmelCase = 96 _UpperCAmelCase = (2, 2, 18, 2) _UpperCAmelCase = (3, 6, 12, 24) elif model_size == "base": _UpperCAmelCase = 128 _UpperCAmelCase = (2, 2, 18, 2) _UpperCAmelCase = (4, 8, 16, 32) else: _UpperCAmelCase = 192 _UpperCAmelCase = (2, 2, 18, 2) _UpperCAmelCase = (6, 12, 24, 48) if "in22k" in swin_name: _UpperCAmelCase = 2_1841 else: _UpperCAmelCase = 1000 _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(A ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} _UpperCAmelCase = img_size _UpperCAmelCase = num_classes _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size return config def UpperCAmelCase ( A : List[str] ): '''simple docstring''' if "patch_embed.proj" in name: _UpperCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _UpperCAmelCase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: _UpperCAmelCase = 'encoder.' + name if "attn.proj" in name: _UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: _UpperCAmelCase = name.replace('attn' , 'attention.self' ) if "norm1" in name: _UpperCAmelCase = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _UpperCAmelCase = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' ) if name == "norm.weight": _UpperCAmelCase = 'layernorm.weight' if name == "norm.bias": _UpperCAmelCase = 'layernorm.bias' if "head" in name: _UpperCAmelCase = name.replace('head' , 'classifier' ) else: _UpperCAmelCase = 'swin.' + name return name def UpperCAmelCase ( A : Dict , A : Tuple ): '''simple docstring''' for key in orig_state_dict.copy().keys(): _UpperCAmelCase = orig_state_dict.pop(A ) if "mask" in key: continue elif "qkv" in key: _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_split[1] ) _UpperCAmelCase = int(key_split[3] ) _UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _UpperCAmelCase = val[:dim, :] _UpperCAmelCase = val[ dim : dim * 2, : ] _UpperCAmelCase = val[-dim:, :] else: _UpperCAmelCase = val[ :dim ] _UpperCAmelCase = val[ dim : dim * 2 ] _UpperCAmelCase = val[ -dim: ] else: _UpperCAmelCase = val return orig_state_dict def UpperCAmelCase ( A : Optional[Any] , A : List[str] ): '''simple docstring''' _UpperCAmelCase = timm.create_model(A , pretrained=A ) timm_model.eval() _UpperCAmelCase = get_swin_config(A ) _UpperCAmelCase = SwinForImageClassification(A ) model.eval() _UpperCAmelCase = convert_state_dict(timm_model.state_dict() , A ) model.load_state_dict(A ) _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) ) _UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw ) _UpperCAmelCase = image_processor(images=A , return_tensors='pt' ) _UpperCAmelCase = timm_model(inputs['pixel_values'] ) _UpperCAmelCase = model(**A ).logits assert torch.allclose(A , A , atol=1e-3 ) print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(A ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swin_name''', default='''swin_tiny_patch4_window7_224''', type=str, help='''Name of the Swin timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowercase = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
1
"""simple docstring""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( A : Any , A : int , A : List[Any] ): '''simple docstring''' _UpperCAmelCase = RemBertConfig.from_json_file(A ) print('Building PyTorch model from configuration: {}'.format(str(A ) ) ) _UpperCAmelCase = RemBertModel(A ) # Load weights from tf checkpoint load_tf_weights_in_rembert(A , A , A ) # Save pytorch-model print('Save PyTorch model to {}'.format(A ) ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowercase = logging.get_logger(__name__) lowercase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } lowercase = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase ( A : Optional[int] , A : Dict , A : List[Any] , A : Union[str, Any] , A : List[Any] , A : List[str] ): '''simple docstring''' for attribute in key.split('.' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models _UpperCAmelCase = 'lm_head' _UpperCAmelCase = getattr(A , A ) if weight_type is not None: _UpperCAmelCase = getattr(A , A ).shape else: _UpperCAmelCase = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _UpperCAmelCase = value elif weight_type == "weight_g": _UpperCAmelCase = value elif weight_type == "weight_v": _UpperCAmelCase = value elif weight_type == "bias": _UpperCAmelCase = value else: _UpperCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def UpperCAmelCase ( A : str , A : Dict , A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = fairseq_model.state_dict() _UpperCAmelCase = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): _UpperCAmelCase = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) _UpperCAmelCase = True else: for key, mapped_key in MAPPING.items(): _UpperCAmelCase = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: _UpperCAmelCase = True if "*" in mapped_key: _UpperCAmelCase = name.split(A )[0].split('.' )[-2] _UpperCAmelCase = mapped_key.replace('*' , A ) if "weight_g" in name: _UpperCAmelCase = 'weight_g' elif "weight_v" in name: _UpperCAmelCase = 'weight_v' elif "bias" in name: _UpperCAmelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj _UpperCAmelCase = 'weight' else: _UpperCAmelCase = None set_recursively(A , A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(f'Unused weights: {unused_weights}' ) def UpperCAmelCase ( A : Union[str, Any] , A : List[Any] , A : Union[str, Any] , A : Optional[int] , A : int ): '''simple docstring''' _UpperCAmelCase = full_name.split('conv_layers.' )[-1] _UpperCAmelCase = name.split('.' ) _UpperCAmelCase = int(items[0] ) _UpperCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) _UpperCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(A ) @torch.no_grad() def UpperCAmelCase ( A : str , A : List[str] , A : Any=None , A : Any=None , A : Dict=True ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = UniSpeechConfig.from_pretrained(A ) else: _UpperCAmelCase = UniSpeechConfig() if is_finetuned: if dict_path: _UpperCAmelCase = Dictionary.load_from_json(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCAmelCase = target_dict.pad_index _UpperCAmelCase = target_dict.bos_index _UpperCAmelCase = target_dict.eos_index _UpperCAmelCase = len(target_dict.symbols ) _UpperCAmelCase = os.path.join(A , 'vocab.json' ) if not os.path.isdir(A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) ) return os.makedirs(A , exist_ok=A ) _UpperCAmelCase = target_dict.indices # fairseq has the <pad> and <s> switched _UpperCAmelCase = 42 _UpperCAmelCase = 43 with open(A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(A , A ) _UpperCAmelCase = WavaVecaPhonemeCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , ) _UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False _UpperCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) _UpperCAmelCase = WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) _UpperCAmelCase = UniSpeechForCTC(A ) else: _UpperCAmelCase = UniSpeechForPreTraining(A ) if is_finetuned: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _UpperCAmelCase = model[0].eval() recursively_load_weights(A , A , A ) hf_unispeech.save_pretrained(A ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowercase = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) _UpperCAmelCase = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) sd_pipe.set_scheduler('sample_euler' ) _UpperCAmelCase = 'A painting of a squirrel eating a burger' _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sd_pipe([prompt] , generator=snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) _UpperCAmelCase = output.images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) _UpperCAmelCase = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) sd_pipe.set_scheduler('sample_euler' ) _UpperCAmelCase = 'A painting of a squirrel eating a burger' _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sd_pipe([prompt] , generator=snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) _UpperCAmelCase = output.images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) _UpperCAmelCase = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) _UpperCAmelCase = 'A painting of a squirrel eating a burger' _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sd_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=snake_case , ) _UpperCAmelCase = output.images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
1
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowercase = NewType('''DataClass''', Any) lowercase = NewType('''DataClassType''', Any) def UpperCAmelCase ( A : List[str] ): '''simple docstring''' if isinstance(A , A ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def UpperCAmelCase ( A : list ): '''simple docstring''' _UpperCAmelCase = {str(A ): choice for choice in choices} return lambda A : str_to_choice.get(A , A ) def UpperCAmelCase ( *, A : Union[str, List[str]] = None , A : str = None , A : Any = dataclasses.MISSING , A : Callable[[], Any] = dataclasses.MISSING , A : dict = None , **A : List[str] , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _UpperCAmelCase = {} if aliases is not None: _UpperCAmelCase = aliases if help is not None: _UpperCAmelCase = help return dataclasses.field(metadata=A , default=A , default_factory=A , **A ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = 42 def __init__( self , snake_case , **snake_case ) -> Optional[int]: # To make the default appear when using --help if "formatter_class" not in kwargs: _UpperCAmelCase = ArgumentDefaultsHelpFormatter super().__init__(**snake_case ) if dataclasses.is_dataclass(snake_case ): _UpperCAmelCase = [dataclass_types] _UpperCAmelCase = list(snake_case ) for dtype in self.dataclass_types: self._add_dataclass_arguments(snake_case ) @staticmethod def lowerCamelCase_ ( snake_case , snake_case ) -> Union[str, Any]: _UpperCAmelCase = f'--{field.name}' _UpperCAmelCase = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , snake_case ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) _UpperCAmelCase = kwargs.pop('aliases' , [] ) if isinstance(snake_case , snake_case ): _UpperCAmelCase = [aliases] _UpperCAmelCase = getattr(field.type , '__origin__' , field.type ) if origin_type is Union or (hasattr(snake_case , 'UnionType' ) and isinstance(snake_case , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(snake_case ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' f' Problem encountered in field \'{field.name}\'.' ) if type(snake_case ) not in field.type.__args__: # filter `str` in Union _UpperCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _UpperCAmelCase = getattr(field.type , '__origin__' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _UpperCAmelCase = ( field.type.__args__[0] if isinstance(snake_case , field.type.__args__[1] ) else field.type.__args__[1] ) _UpperCAmelCase = getattr(field.type , '__origin__' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _UpperCAmelCase = {} if origin_type is Literal or (isinstance(field.type , snake_case ) and issubclass(field.type , snake_case )): if origin_type is Literal: _UpperCAmelCase = field.type.__args__ else: _UpperCAmelCase = [x.value for x in field.type] _UpperCAmelCase = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: _UpperCAmelCase = field.default else: _UpperCAmelCase = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _UpperCAmelCase = copy(snake_case ) # Hack because type=bool in argparse does not behave as we want. _UpperCAmelCase = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _UpperCAmelCase = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _UpperCAmelCase = default # This tells argparse we accept 0 or 1 value after --field_name _UpperCAmelCase = '?' # This is the value that will get picked if we do --field_name (without value) _UpperCAmelCase = True elif isclass(snake_case ) and issubclass(snake_case , snake_case ): _UpperCAmelCase = field.type.__args__[0] _UpperCAmelCase = '+' if field.default_factory is not dataclasses.MISSING: _UpperCAmelCase = field.default_factory() elif field.default is dataclasses.MISSING: _UpperCAmelCase = True else: _UpperCAmelCase = field.type if field.default is not dataclasses.MISSING: _UpperCAmelCase = field.default elif field.default_factory is not dataclasses.MISSING: _UpperCAmelCase = field.default_factory() else: _UpperCAmelCase = True parser.add_argument(snake_case , *snake_case , **snake_case ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _UpperCAmelCase = False parser.add_argument(f'--no_{field.name}' , action='store_false' , dest=field.name , **snake_case ) def lowerCamelCase_ ( self , snake_case ) -> Tuple: if hasattr(snake_case , '_argument_group_name' ): _UpperCAmelCase = self.add_argument_group(dtype._argument_group_name ) else: _UpperCAmelCase = self try: _UpperCAmelCase = get_type_hints(snake_case ) except NameError: raise RuntimeError( f'Type resolution failed for {dtype}. Try declaring the class in global scope or ' 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(snake_case ): _UpperCAmelCase = '.'.join(map(snake_case , sys.version_info[:3] ) ) raise RuntimeError( f'Type resolution failed for {dtype} on Python {python_version}. Try removing ' 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(snake_case ): if not field.init: continue _UpperCAmelCase = type_hints[field.name] self._parse_dataclass_field(snake_case , snake_case ) def lowerCamelCase_ ( self , snake_case=None , snake_case=False , snake_case=True , snake_case=None , snake_case=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _UpperCAmelCase = [] if args_filename: args_files.append(Path(snake_case ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _UpperCAmelCase = ArgumentParser() args_file_parser.add_argument(snake_case , type=snake_case , action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) _UpperCAmelCase , _UpperCAmelCase = args_file_parser.parse_known_args(args=snake_case ) _UpperCAmelCase = vars(snake_case ).get(args_file_flag.lstrip('-' ) , snake_case ) if cmd_args_file_paths: args_files.extend([Path(snake_case ) for p in cmd_args_file_paths] ) _UpperCAmelCase = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _UpperCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:] _UpperCAmelCase , _UpperCAmelCase = self.parse_known_args(args=snake_case ) _UpperCAmelCase = [] for dtype in self.dataclass_types: _UpperCAmelCase = {f.name for f in dataclasses.fields(snake_case ) if f.init} _UpperCAmelCase = {k: v for k, v in vars(snake_case ).items() if k in keys} for k in keys: delattr(snake_case , snake_case ) _UpperCAmelCase = dtype(**snake_case ) outputs.append(snake_case ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(snake_case ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def lowerCamelCase_ ( self , snake_case , snake_case = False ) -> Tuple[DataClass, ...]: _UpperCAmelCase = set(args.keys() ) _UpperCAmelCase = [] for dtype in self.dataclass_types: _UpperCAmelCase = {f.name for f in dataclasses.fields(snake_case ) if f.init} _UpperCAmelCase = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _UpperCAmelCase = dtype(**snake_case ) outputs.append(snake_case ) if not allow_extra_keys and unused_keys: raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(snake_case )}' ) return tuple(snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case = False ) -> Tuple[DataClass, ...]: with open(Path(snake_case ) , encoding='utf-8' ) as open_json_file: _UpperCAmelCase = json.loads(open_json_file.read() ) _UpperCAmelCase = self.parse_dict(snake_case , allow_extra_keys=snake_case ) return tuple(snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case = False ) -> Tuple[DataClass, ...]: _UpperCAmelCase = self.parse_dict(yaml.safe_load(Path(snake_case ).read_text() ) , allow_extra_keys=snake_case ) return tuple(snake_case )
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
1
"""simple docstring""" from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowercase = datasets.utils.logging.get_logger(__name__) class lowercase__ ( folder_based_builder.FolderBasedBuilderConfig ): '''simple docstring''' _UpperCAmelCase = None _UpperCAmelCase = None class lowercase__ ( folder_based_builder.FolderBasedBuilder ): '''simple docstring''' _UpperCAmelCase = datasets.Audio() _UpperCAmelCase = '''audio''' _UpperCAmelCase = AudioFolderConfig _UpperCAmelCase = 42 # definition at the bottom of the script _UpperCAmelCase = AudioClassification(audio_column='''audio''', label_column='''label''' ) lowercase = [ '''.aiff''', '''.au''', '''.avr''', '''.caf''', '''.flac''', '''.htk''', '''.svx''', '''.mat4''', '''.mat5''', '''.mpc2k''', '''.ogg''', '''.paf''', '''.pvf''', '''.raw''', '''.rf64''', '''.sd2''', '''.sds''', '''.ircam''', '''.voc''', '''.w64''', '''.wav''', '''.nist''', '''.wavex''', '''.wve''', '''.xi''', '''.mp3''', '''.opus''', ] lowercase = AUDIO_EXTENSIONS
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
1
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( '''pipelines_utils''', '''0.22.0''', '''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''', standard_warn=False, stacklevel=3, )
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
1
"""simple docstring""" from random import randint, random def UpperCAmelCase ( A : int , A : int , A : int , A : bool = False , A : bool = False , A : int = 5 , ): '''simple docstring''' _UpperCAmelCase = [[-1] * number_of_cells] # Create a highway without any car _UpperCAmelCase = 0 _UpperCAmelCase = max(A , 0 ) while i < number_of_cells: _UpperCAmelCase = ( randint(0 , A ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def UpperCAmelCase ( A : list , A : int ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = highway_now[car_index + 1 :] for cell in range(len(A ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(A , -1 ) def UpperCAmelCase ( A : list , A : float , A : int ): '''simple docstring''' _UpperCAmelCase = len(A ) # Beforce calculations, the highway is empty _UpperCAmelCase = [-1] * number_of_cells for car_index in range(A ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed _UpperCAmelCase = min(highway_now[car_index] + 1 , A ) # Number of empty cell before the next car _UpperCAmelCase = get_distance(A , A ) - 1 # We can't have the car causing an accident _UpperCAmelCase = min(next_highway[car_index] , A ) if random() < probability: # Randomly, a driver will slow down _UpperCAmelCase = max(next_highway[car_index] - 1 , 0 ) return next_highway def UpperCAmelCase ( A : list , A : int , A : float , A : int ): '''simple docstring''' _UpperCAmelCase = len(highway[0] ) for i in range(A ): _UpperCAmelCase = update(highway[i] , A , A ) _UpperCAmelCase = [-1] * number_of_cells for car_index in range(A ): _UpperCAmelCase = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) _UpperCAmelCase = (car_index + speed) % number_of_cells # Commit the change of position _UpperCAmelCase = speed highway.append(A ) return highway if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = GPTaTokenizer _UpperCAmelCase = GPTaTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = {'''add_prefix_space''': True} _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] _UpperCAmelCase = dict(zip(snake_case , range(len(snake_case ) ) ) ) _UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _UpperCAmelCase = {'unk_token': '<unk>'} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case ) ) def lowerCamelCase_ ( self , **snake_case ) -> Dict: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def lowerCamelCase_ ( self , **snake_case ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]: _UpperCAmelCase = 'lower newer' _UpperCAmelCase = 'lower newer' return input_text, output_text def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase = 'lower newer' _UpperCAmelCase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] _UpperCAmelCase = tokenizer.tokenize(snake_case , add_prefix_space=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> int: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer(add_prefix_space=snake_case ) _UpperCAmelCase = 'lower newer' # Testing tokenization _UpperCAmelCase = tokenizer.tokenize(snake_case , add_prefix_space=snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) # Testing conversion to ids without special tokens _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) # Testing conversion to ids with special tokens _UpperCAmelCase = self.get_rust_tokenizer(add_prefix_space=snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_prefix_space=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) # Testing the unknown token _UpperCAmelCase = tokens + [rust_tokenizer.unk_token] _UpperCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case ) , snake_case ) def lowerCamelCase_ ( self , *snake_case , **snake_case ) -> List[str]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def lowerCamelCase_ ( self , snake_case=15 ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # Simple input _UpperCAmelCase = 'This is a simple input' _UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2'] _UpperCAmelCase = ('This is a simple input', 'This is a pair') _UpperCAmelCase = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input _UpperCAmelCase = 'This is a simple input' _UpperCAmelCase = ['This is a simple input looooooooong', 'This is a simple input'] _UpperCAmelCase = ('This is a simple input', 'This is a pair') _UpperCAmelCase = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] _UpperCAmelCase = tokenizer.pad_token_id _UpperCAmelCase = tokenizer(snake_case , padding='max_length' , max_length=30 , return_tensors='np' ) _UpperCAmelCase = tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors='np' ) _UpperCAmelCase = tokenizer(*snake_case , padding='max_length' , max_length=60 , return_tensors='np' ) _UpperCAmelCase = tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = '$$$' _UpperCAmelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case , add_bos_token=snake_case ) _UpperCAmelCase = 'This is a simple input' _UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2'] _UpperCAmelCase = tokenizer.bos_token_id _UpperCAmelCase = tokenizer(snake_case ) _UpperCAmelCase = tokenizer(snake_case ) self.assertEqual(out_s.input_ids[0] , snake_case ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _UpperCAmelCase = tokenizer.decode(out_s.input_ids ) _UpperCAmelCase = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , snake_case ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def lowerCamelCase_ ( self ) -> Dict: pass def lowerCamelCase_ ( self ) -> Union[str, Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented _UpperCAmelCase = [self.get_tokenizer(do_lower_case=snake_case , add_bos_token=snake_case )] for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): _UpperCAmelCase = 'Encode this.' _UpperCAmelCase = 'This one too please.' _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) encoded_sequence += tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = tokenizer.encode_plus( snake_case , snake_case , add_special_tokens=snake_case , return_special_tokens_mask=snake_case , ) _UpperCAmelCase = encoded_sequence_dict['input_ids'] _UpperCAmelCase = encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(snake_case ) , len(snake_case ) ) _UpperCAmelCase = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(snake_case ) ] _UpperCAmelCase = [x for x in filtered_sequence if x is not None] self.assertEqual(snake_case , snake_case ) @require_tokenizers class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> str: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _UpperCAmelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=snake_case ) _UpperCAmelCase = 'A photo of a cat' _UpperCAmelCase = tokenizer.encode( snake_case , ) self.assertEqual(snake_case , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('test_opt' ) _UpperCAmelCase = AutoTokenizer.from_pretrained('./test_opt' ) _UpperCAmelCase = tokenizer.encode( snake_case , ) self.assertEqual(snake_case , [2, 250, 1345, 9, 10, 4758] ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=snake_case ) _UpperCAmelCase = 'A photo of a cat' _UpperCAmelCase = tokenizer.encode( snake_case , ) # Same as above self.assertEqual(snake_case , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=snake_case ) _UpperCAmelCase = 'bos' _UpperCAmelCase = tokenizer.get_vocab()['bos'] _UpperCAmelCase = 'A photo of a cat' _UpperCAmelCase = tokenizer.encode( snake_case , ) # We changed the bos token self.assertEqual(snake_case , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('./tok' ) _UpperCAmelCase = AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) _UpperCAmelCase = tokenizer.encode( snake_case , ) self.assertEqual(snake_case , [31957, 250, 1345, 9, 10, 4758] )
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva lowercase = '''''' lowercase = '''''' lowercase = '''''' lowercase = 1 # (0 is vertical, 1 is horizontal) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = get_dataset(A , A ) print('Processing...' ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = update_image_and_anno(A , A , A ) for index, image in enumerate(A ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _UpperCAmelCase = random_chars(32 ) _UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _UpperCAmelCase = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}' cva.imwrite(f'/{file_root}.jpg' , A , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Success {index+1}/{len(A )} with {file_name}' ) _UpperCAmelCase = [] for anno in new_annos[index]: _UpperCAmelCase = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}' annos_list.append(A ) with open(f'/{file_root}.txt' , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def UpperCAmelCase ( A : str , A : str ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] for label_file in glob.glob(os.path.join(A , '*.txt' ) ): _UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(A ) as in_file: _UpperCAmelCase = in_file.readlines() _UpperCAmelCase = os.path.join(A , f'{label_name}.jpg' ) _UpperCAmelCase = [] for obj_list in obj_lists: _UpperCAmelCase = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(A ) labels.append(A ) return img_paths, labels def UpperCAmelCase ( A : list , A : list , A : int = 1 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] for idx in range(len(A ) ): _UpperCAmelCase = [] _UpperCAmelCase = img_list[idx] path_list.append(A ) _UpperCAmelCase = anno_list[idx] _UpperCAmelCase = cva.imread(A ) if flip_type == 1: _UpperCAmelCase = cva.flip(A , A ) for bbox in img_annos: _UpperCAmelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _UpperCAmelCase = cva.flip(A , A ) for bbox in img_annos: _UpperCAmelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(A ) new_imgs_list.append(A ) return new_imgs_list, new_annos_lists, path_list def UpperCAmelCase ( A : int = 32 ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" _UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(A ) for _ in range(A ) ) if __name__ == "__main__": main() print('''DONE ✅''')
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_attention_heads' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_encoder_blocks' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[16, 32, 64, 128] , snake_case=[1, 4, 8, 16] , snake_case=[1, 2, 4, 8] , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=3 , snake_case=None , ) -> List[str]: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = sr_ratios _UpperCAmelCase = depths _UpperCAmelCase = hidden_sizes _UpperCAmelCase = downsampling_rates _UpperCAmelCase = num_attention_heads _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = scope def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[Any]: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> str: _UpperCAmelCase = SegformerModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = SegformerForSemanticSegmentation(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Tuple: _UpperCAmelCase = 1 _UpperCAmelCase = SegformerForSemanticSegmentation(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case ) _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = SegformerModelTester(self ) _UpperCAmelCase = SegformerConfigTester(self , config_class=snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*snake_case ) @unittest.skip('SegFormer does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' ) def lowerCamelCase_ ( self ) -> str: pass def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.attentions _UpperCAmelCase = sum(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) # verify the first attentions (first block, first layer) _UpperCAmelCase = (self.model_tester.image_size // 4) ** 2 _UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _UpperCAmelCase = (self.model_tester.image_size // 32) ** 2 _UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _UpperCAmelCase = len(snake_case ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) self.assertEqual(out_len + 1 , len(snake_case ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) # verify the first attentions (first block, first layer) _UpperCAmelCase = (self.model_tester.image_size // 4) ** 2 _UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowerCamelCase_ ( self ) -> int: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = self.model_tester.num_encoder_blocks self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.model_tester.is_training: return _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(snake_case ): continue _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.train() _UpperCAmelCase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) _UpperCAmelCase = model(**snake_case ).loss loss.backward() @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> int: pass @slow def lowerCamelCase_ ( self ) -> Union[str, Any]: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = SegformerModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCamelCase_ ( self ) -> Union[str, Any]: # only resize + normalize _UpperCAmelCase = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case ) _UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( snake_case ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ) _UpperCAmelCase = encoded_inputs.pixel_values.to(snake_case ) with torch.no_grad(): _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1E-4 ) ) @slow def lowerCamelCase_ ( self ) -> int: # only resize + normalize _UpperCAmelCase = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case ) _UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained( 'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(snake_case ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ) _UpperCAmelCase = encoded_inputs.pixel_values.to(snake_case ) with torch.no_grad(): _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1E-1 ) ) @slow def lowerCamelCase_ ( self ) -> Any: # only resize + normalize _UpperCAmelCase = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case ) _UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( snake_case ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ) _UpperCAmelCase = encoded_inputs.pixel_values.to(snake_case ) with torch.no_grad(): _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = outputs.logits.detach().cpu() _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(500, 300)] ) _UpperCAmelCase = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , snake_case ) _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=snake_case ) _UpperCAmelCase = torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , snake_case )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase = '''src/diffusers''' lowercase = '''.''' # This is to make sure the diffusers module imported is the one in the repo. lowercase = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase = spec.loader.load_module() def UpperCAmelCase ( A : Any , A : Tuple ): '''simple docstring''' return line.startswith(A ) or len(A ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , A ) is not None def UpperCAmelCase ( A : Dict ): '''simple docstring''' _UpperCAmelCase = object_name.split('.' ) _UpperCAmelCase = 0 # First let's find the module where our object lives. _UpperCAmelCase = parts[i] while i < len(A ) and not os.path.isfile(os.path.join(A , f'{module}.py' ) ): i += 1 if i < len(A ): _UpperCAmelCase = os.path.join(A , parts[i] ) if i >= len(A ): raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' ) with open(os.path.join(A , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: _UpperCAmelCase = f.readlines() # Now let's find the class / func in the code! _UpperCAmelCase = '' _UpperCAmelCase = 0 for name in parts[i + 1 :]: while ( line_index < len(A ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(A ): raise ValueError(f' {object_name} does not match any function or class in {module}.' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). _UpperCAmelCase = line_index while line_index < len(A ) and _should_continue(lines[line_index] , A ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _UpperCAmelCase = lines[start_index:line_index] return "".join(A ) lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''') lowercase = re.compile(r'''<FILL\s+[^>]*>''') def UpperCAmelCase ( A : Dict ): '''simple docstring''' _UpperCAmelCase = code.split('\n' ) _UpperCAmelCase = 0 while idx < len(A ) and len(lines[idx] ) == 0: idx += 1 if idx < len(A ): return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0] return "" def UpperCAmelCase ( A : Any ): '''simple docstring''' _UpperCAmelCase = len(get_indent(A ) ) > 0 if has_indent: _UpperCAmelCase = f'class Bla:\n{code}' _UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=A ) _UpperCAmelCase = black.format_str(A , mode=A ) _UpperCAmelCase , _UpperCAmelCase = style_docstrings_in_code(A ) return result[len('class Bla:\n' ) :] if has_indent else result def UpperCAmelCase ( A : int , A : int=False ): '''simple docstring''' with open(A , 'r' , encoding='utf-8' , newline='\n' ) as f: _UpperCAmelCase = f.readlines() _UpperCAmelCase = [] _UpperCAmelCase = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(A ): _UpperCAmelCase = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = search.groups() _UpperCAmelCase = find_code_in_diffusers(A ) _UpperCAmelCase = get_indent(A ) _UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2 _UpperCAmelCase = theoretical_indent _UpperCAmelCase = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. _UpperCAmelCase = True while line_index < len(A ) and should_continue: line_index += 1 if line_index >= len(A ): break _UpperCAmelCase = lines[line_index] _UpperCAmelCase = _should_continue(A , A ) and re.search(f'^{indent}# End copy' , A ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _UpperCAmelCase = lines[start_index:line_index] _UpperCAmelCase = ''.join(A ) # Remove any nested `Copied from` comments to avoid circular copies _UpperCAmelCase = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(A ) is None] _UpperCAmelCase = '\n'.join(A ) # Before comparing, use the `replace_pattern` on the original code. if len(A ) > 0: _UpperCAmelCase = replace_pattern.replace('with' , '' ).split(',' ) _UpperCAmelCase = [_re_replace_pattern.search(A ) for p in patterns] for pattern in patterns: if pattern is None: continue _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pattern.groups() _UpperCAmelCase = re.sub(A , A , A ) if option.strip() == "all-casing": _UpperCAmelCase = re.sub(obja.lower() , obja.lower() , A ) _UpperCAmelCase = re.sub(obja.upper() , obja.upper() , A ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line _UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code ) _UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: _UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:] _UpperCAmelCase = start_index + 1 if overwrite and len(A ) > 0: # Warn the user a file has been modified. print(f'Detected changes, rewriting {filename}.' ) with open(A , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(A ) return diffs def UpperCAmelCase ( A : bool = False ): '''simple docstring''' _UpperCAmelCase = glob.glob(os.path.join(A , '**/*.py' ) , recursive=A ) _UpperCAmelCase = [] for filename in all_files: _UpperCAmelCase = is_copy_consistent(A , A ) diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs] if not overwrite and len(A ) > 0: _UpperCAmelCase = '\n'.join(A ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowercase = parser.parse_args() check_copies(args.fix_and_overwrite)
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case , snake_case = None , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = None , snake_case = True , snake_case = "arrow" , **snake_case , ) -> Optional[int]: super().__init__( split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , **snake_case , ) _UpperCAmelCase = load_from_cache_file _UpperCAmelCase = file_format _UpperCAmelCase = Spark( df=snake_case , features=snake_case , cache_dir=snake_case , working_dir=snake_case , **snake_case , ) def lowerCamelCase_ ( self ) -> Optional[Any]: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) _UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=snake_case , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
1
"""simple docstring""" import os def UpperCAmelCase ( A : str = "matrix.txt" ): '''simple docstring''' with open(os.path.join(os.path.dirname(A ) , A ) ) as in_file: _UpperCAmelCase = in_file.read() _UpperCAmelCase = [[int(A ) for cell in row.split(',' )] for row in data.strip().splitlines()] _UpperCAmelCase = [[0 for cell in row] for row in grid] _UpperCAmelCase = len(grid[0] ) _UpperCAmelCase = [[0 for i in range(A )] for j in range(A )] _UpperCAmelCase = grid[0][0] for i in range(1 , A ): _UpperCAmelCase = grid[0][i] + dp[0][i - 1] for i in range(1 , A ): _UpperCAmelCase = grid[i][0] + dp[i - 1][0] for i in range(1 , A ): for j in range(1 , A ): _UpperCAmelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F'''{solution() = }''')
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" import sys lowercase = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 1 for digit in s: product *= int(A ) return product def UpperCAmelCase ( A : str = N ): '''simple docstring''' _UpperCAmelCase = -sys.maxsize - 1 _UpperCAmelCase = n[:13] _UpperCAmelCase = 13 while cur_index < len(A ) - 13: if int(n[cur_index] ) >= int(substr[0] ): _UpperCAmelCase = substr[1:] + n[cur_index] cur_index += 1 else: _UpperCAmelCase = max(A , str_eval(A ) ) _UpperCAmelCase = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = ['''input_features''', '''attention_mask'''] def __init__( self , snake_case=80 , snake_case=16000 , snake_case=80 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> Tuple: super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case ) _UpperCAmelCase = num_mel_bins _UpperCAmelCase = do_ceptral_normalize _UpperCAmelCase = normalize_means _UpperCAmelCase = normalize_vars _UpperCAmelCase = True def lowerCamelCase_ ( self , snake_case , ) -> np.ndarray: _UpperCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers _UpperCAmelCase = torch.from_numpy(snake_case ).unsqueeze(0 ) _UpperCAmelCase = ta_kaldi.fbank(snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def lowerCamelCase_ ( snake_case , snake_case , snake_case = True , snake_case = True , snake_case = 0.0 , ) -> np.ndarray: # make sure we normalize float32 arrays if normalize_means: _UpperCAmelCase = x[:input_length].mean(axis=0 ) _UpperCAmelCase = np.subtract(snake_case , snake_case ) if normalize_vars: _UpperCAmelCase = x[:input_length].std(axis=0 ) _UpperCAmelCase = np.divide(snake_case , snake_case ) if input_length < x.shape[0]: _UpperCAmelCase = padding_value # make sure array is in float32 _UpperCAmelCase = x.astype(np.floataa ) return x def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[np.ndarray]: _UpperCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(snake_case , snake_case , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(snake_case , snake_case ) ] def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _UpperCAmelCase = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) _UpperCAmelCase = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCAmelCase = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): _UpperCAmelCase = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _UpperCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _UpperCAmelCase = [raw_speech] # extract fbank features _UpperCAmelCase = [self._extract_fbank_features(snake_case ) for waveform in raw_speech] # convert into correct format for padding _UpperCAmelCase = BatchFeature({'input_features': features} ) _UpperCAmelCase = self.pad( snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , ) # make sure list is in array format _UpperCAmelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , snake_case ): _UpperCAmelCase = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features] _UpperCAmelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: _UpperCAmelCase = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: _UpperCAmelCase = ( np.array(snake_case , dtype=np.intaa ) if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD else None ) _UpperCAmelCase = self.normalize( padded_inputs['input_features'] , attention_mask=snake_case ) if return_tensors is not None: _UpperCAmelCase = padded_inputs.convert_to_tensors(snake_case ) return padded_inputs
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : list[float] , A : str ): '''simple docstring''' print(f'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(A ): print(f'{i}\t\t{d}' ) def UpperCAmelCase ( A : list[dict[str, int]] , A : list[float] , A : int ): '''simple docstring''' for j in range(A ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: return True return False def UpperCAmelCase ( A : list[dict[str, int]] , A : int , A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [float('inf' )] * vertex_count _UpperCAmelCase = 0.0 for _ in range(vertex_count - 1 ): for j in range(A ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: _UpperCAmelCase = distance[u] + w _UpperCAmelCase = check_negative_cycle(A , A , A ) if negative_cycle_exists: raise Exception('Negative cycle found' ) return distance if __name__ == "__main__": import doctest doctest.testmod() lowercase = int(input('''Enter number of vertices: ''').strip()) lowercase = int(input('''Enter number of edges: ''').strip()) lowercase = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) lowercase , lowercase , lowercase = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight} lowercase = int(input('''\nEnter shortest path source:''').strip()) lowercase = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = VQModel _UpperCAmelCase = '''sample''' @property def lowerCamelCase_ ( self , snake_case=(32, 32) ) -> Optional[int]: _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) return {"sample": image} @property def lowerCamelCase_ ( self ) -> Any: return (3, 32, 32) @property def lowerCamelCase_ ( self ) -> int: return (3, 32, 32) def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self ) -> Any: pass def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(snake_case ) _UpperCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = VQModel.from_pretrained('fusing/vqgan-dummy' ) model.to(snake_case ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) _UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) _UpperCAmelCase = image.to(snake_case ) with torch.no_grad(): _UpperCAmelCase = model(snake_case ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
1
"""simple docstring""" def UpperCAmelCase ( A : list[int] , A : list[int] ): '''simple docstring''' _UpperCAmelCase = len(A ) print('The following activities are selected:' ) # The first activity is always selected _UpperCAmelCase = 0 print(A , end=',' ) # Consider rest of the activities for j in range(A ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(A , end=',' ) _UpperCAmelCase = j if __name__ == "__main__": import doctest doctest.testmod() lowercase = [1, 3, 0, 5, 8, 5] lowercase = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
1
"""simple docstring""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( A : Tuple , A : List[Any] , A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = BertConfig.from_json_file(A ) print(f'Building PyTorch model from configuration: {config}' ) _UpperCAmelCase = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
1
"""simple docstring""" from math import factorial def UpperCAmelCase ( A : int = 100 ): '''simple docstring''' return sum(map(A , str(factorial(A ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = StableDiffusionSAGPipeline _UpperCAmelCase = TEXT_TO_IMAGE_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Tuple: torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _UpperCAmelCase = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _UpperCAmelCase = CLIPTextModel(snake_case ) _UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _UpperCAmelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Tuple: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) _UpperCAmelCase = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = '.' _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) _UpperCAmelCase = output.images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) _UpperCAmelCase = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = '.' _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) _UpperCAmelCase = output.images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) _UpperCAmelCase = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = '.' _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sag_pipe( [prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) _UpperCAmelCase = output.images assert image.shape == (1, 512, 768, 3)
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
1
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' _UpperCAmelCase = [] for i in range(len(A ) ): _UpperCAmelCase = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours _UpperCAmelCase = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A ) - 1: neighbour_count += cells[i + 1][j] if i < len(A ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. _UpperCAmelCase = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A ) return next_generation def UpperCAmelCase ( A : list[list[int]] , A : int ): '''simple docstring''' _UpperCAmelCase = [] for _ in range(A ): # Create output image _UpperCAmelCase = Image.new('RGB' , (len(cells[0] ), len(A )) ) _UpperCAmelCase = img.load() # Save cells to image for x in range(len(A ) ): for y in range(len(cells[0] ) ): _UpperCAmelCase = 255 - cells[y][x] * 255 _UpperCAmelCase = (colour, colour, colour) # Save image images.append(A ) _UpperCAmelCase = new_generation(A ) return images if __name__ == "__main__": lowercase = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
1
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowercase = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' lowercase = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' lowercase = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' lowercase = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' lowercase = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Any: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=[1, 10, 100] , snake_case=4 , snake_case=3.0 ) -> str: if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.' ) with ThreadPoolExecutor(max_workers=snake_case ) as executor: _UpperCAmelCase = [] _UpperCAmelCase = Counter() _UpperCAmelCase = 0 _UpperCAmelCase = defaultdict(snake_case ) for task_id, (candidates, test_case) in enumerate(zip(snake_case , snake_case ) ): for candidate in candidates: _UpperCAmelCase = candidate + '\n' + test_case _UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id]) _UpperCAmelCase = executor.submit(snake_case , *snake_case ) futures.append(snake_case ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(snake_case ): _UpperCAmelCase = future.result() results[result["task_id"]].append((result['completion_id'], result) ) _UpperCAmelCase , _UpperCAmelCase = [], [] for result in results.values(): result.sort() _UpperCAmelCase = [r[1]['passed'] for r in result] total.append(len(snake_case ) ) correct.append(sum(snake_case ) ) _UpperCAmelCase = np.array(snake_case ) _UpperCAmelCase = np.array(snake_case ) _UpperCAmelCase = k _UpperCAmelCase = {f'pass@{k}': estimate_pass_at_k(snake_case , snake_case , snake_case ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def UpperCAmelCase ( A : int , A : List[str] , A : str ): '''simple docstring''' def estimator(A : int , A : int , A : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(A , A ): _UpperCAmelCase = itertools.repeat(A , len(A ) ) else: assert len(A ) == len(A ) _UpperCAmelCase = iter(A ) return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def UpperCAmelCase ( A : Namespace ): '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) lowercase = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class lowercase__ ( A ): '''simple docstring''' @staticmethod def lowerCamelCase_ ( snake_case ) -> str: _UpperCAmelCase = parser.add_parser( 'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , ) train_parser.add_argument('--model_type' , type=snake_case , required=snake_case , help='Model\'s type.' ) train_parser.add_argument( '--tf_checkpoint' , type=snake_case , required=snake_case , help='TensorFlow checkpoint path or folder.' ) train_parser.add_argument( '--pytorch_dump_output' , type=snake_case , required=snake_case , help='Path to the PyTorch saved model output.' ) train_parser.add_argument('--config' , type=snake_case , default='' , help='Configuration file path or folder.' ) train_parser.add_argument( '--finetuning_task_name' , type=snake_case , default=snake_case , help='Optional fine-tuning task name if the TF model was a finetuned model.' , ) train_parser.set_defaults(func=snake_case ) def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , *snake_case , ) -> str: _UpperCAmelCase = logging.get_logger('transformers-cli/converting' ) self._logger.info(f'Loading model {model_type}' ) _UpperCAmelCase = model_type _UpperCAmelCase = tf_checkpoint _UpperCAmelCase = pytorch_dump_output _UpperCAmelCase = config _UpperCAmelCase = finetuning_task_name def lowerCamelCase_ ( self ) -> Dict: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(snake_case ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) if "ckpt" in self._tf_checkpoint.lower(): _UpperCAmelCase = self._tf_checkpoint _UpperCAmelCase = '' else: _UpperCAmelCase = self._tf_checkpoint _UpperCAmelCase = '' convert_transfo_xl_checkpoint_to_pytorch( snake_case , self._config , self._pytorch_dump_output , snake_case ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
1
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class lowercase__ ( nn.Module ): '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = jnp.floataa def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , snake_case ) -> Optional[Any]: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape _UpperCAmelCase = jax.image.resize( snake_case , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) _UpperCAmelCase = self.conv(snake_case ) return hidden_states class lowercase__ ( nn.Module ): '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = jnp.floataa def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , snake_case ) -> Tuple: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _UpperCAmelCase = self.conv(snake_case ) return hidden_states class lowercase__ ( nn.Module ): '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = None _UpperCAmelCase = 0.0 _UpperCAmelCase = None _UpperCAmelCase = jnp.floataa def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels _UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) _UpperCAmelCase = nn.Conv( snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCAmelCase = nn.Dense(snake_case , dtype=self.dtype ) _UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) _UpperCAmelCase = nn.Dropout(self.dropout_prob ) _UpperCAmelCase = nn.Conv( snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _UpperCAmelCase = None if use_nin_shortcut: _UpperCAmelCase = nn.Conv( snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self , snake_case , snake_case , snake_case=True ) -> Union[str, Any]: _UpperCAmelCase = hidden_states _UpperCAmelCase = self.norma(snake_case ) _UpperCAmelCase = nn.swish(snake_case ) _UpperCAmelCase = self.conva(snake_case ) _UpperCAmelCase = self.time_emb_proj(nn.swish(snake_case ) ) _UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(snake_case , 1 ) , 1 ) _UpperCAmelCase = hidden_states + temb _UpperCAmelCase = self.norma(snake_case ) _UpperCAmelCase = nn.swish(snake_case ) _UpperCAmelCase = self.dropout(snake_case , snake_case ) _UpperCAmelCase = self.conva(snake_case ) if self.conv_shortcut is not None: _UpperCAmelCase = self.conv_shortcut(snake_case ) return hidden_states + residual
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
1
"""simple docstring""" def UpperCAmelCase ( A : Optional[Any] , A : Optional[int] , A : Dict , A : Dict , A : Optional[Any] , A : Tuple ): '''simple docstring''' if index == r: for j in range(A ): print(data[j] , end=' ' ) print(' ' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location _UpperCAmelCase = arr[i] combination_util(A , A , A , index + 1 , A , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(A , A , A , A , A , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def UpperCAmelCase ( A : Any , A : str , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = [0] * r # Print all combination using temporary array 'data[]' combination_util(A , A , A , 0 , A , 0 ) if __name__ == "__main__": # Driver code to check the function above lowercase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
1
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( A : str , A : Dict , A : Tuple , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = FunnelConfig.from_json_file(A ) print(f'Building PyTorch model from configuration: {config}' ) _UpperCAmelCase = FunnelBaseModel(A ) if base_model else FunnelModel(A ) # Load weights from tf checkpoint load_tf_weights_in_funnel(A , A , A ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.''' ) lowercase = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
1
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = IFPipeline _UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} _UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowerCamelCase_ ( self ) -> int: return self._get_dummy_components() def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> int: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> Any: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def lowerCamelCase_ ( self ) -> Optional[int]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowerCamelCase_ ( self ) -> Any: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowerCamelCase_ ( self ) -> Any: self._test_save_load_local() def lowerCamelCase_ ( self ) -> str: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> Dict: # if _UpperCAmelCase = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa ) _UpperCAmelCase = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=snake_case , tokenizer=snake_case ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) _UpperCAmelCase , _UpperCAmelCase = pipe_a.encode_prompt('anime turtle' , device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _UpperCAmelCase = None _UpperCAmelCase = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case , snake_case , snake_case , snake_case ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _UpperCAmelCase = IFImgaImgPipeline(**pipe_a.components ) _UpperCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case , snake_case , snake_case , snake_case ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _UpperCAmelCase = IFInpaintingPipeline(**pipe_a.components ) _UpperCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case , snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]: # pipeline 1 _start_torch_memory_measurement() _UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) _UpperCAmelCase = pipe_a( prompt_embeds=snake_case , negative_prompt_embeds=snake_case , num_inference_steps=2 , generator=snake_case , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (64, 64, 3) _UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(snake_case , snake_case ) # pipeline 2 _start_torch_memory_measurement() _UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case ) _UpperCAmelCase = pipe_a( prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (256, 256, 3) _UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(snake_case , snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case ) _UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) _UpperCAmelCase = pipe_a( prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , num_inference_steps=2 , generator=snake_case , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (64, 64, 3) _UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(snake_case , snake_case ) # pipeline 2 _start_torch_memory_measurement() _UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) _UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case ) _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case ) _UpperCAmelCase = pipe_a( prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , original_image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (256, 256, 3) _UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(snake_case , snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]: # pipeline 1 _start_torch_memory_measurement() _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case ) _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case ) _UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) _UpperCAmelCase = pipe_a( prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , mask_image=snake_case , num_inference_steps=2 , generator=snake_case , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (64, 64, 3) _UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(snake_case , snake_case ) # pipeline 2 _start_torch_memory_measurement() _UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case ) _UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case ) _UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case ) _UpperCAmelCase = pipe_a( prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , mask_image=snake_case , original_image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (256, 256, 3) _UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(snake_case , snake_case ) def UpperCAmelCase ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def UpperCAmelCase ( A : Union[str, Any] ): '''simple docstring''' return getitem, k def UpperCAmelCase ( A : int , A : Optional[Any] ): '''simple docstring''' return setitem, k, v def UpperCAmelCase ( A : List[Any] ): '''simple docstring''' return delitem, k def UpperCAmelCase ( A : Optional[int] , A : Tuple , *A : List[Any] ): '''simple docstring''' try: return fun(A , *A ), None except Exception as e: return None, e lowercase = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowercase = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowercase = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowercase = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowercase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowercase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( 'operations' , ( pytest.param(_add_items , id='add items' ), pytest.param(_overwrite_items , id='overwrite items' ), pytest.param(_delete_items , id='delete items' ), pytest.param(_access_absent_items , id='access absent items' ), pytest.param(_add_with_resize_up , id='add with resize up' ), pytest.param(_add_with_resize_down , id='add with resize down' ), ) , ) def UpperCAmelCase ( A : Tuple ): '''simple docstring''' _UpperCAmelCase = HashMap(initial_block_size=4 ) _UpperCAmelCase = {} for _, (fun, *args) in enumerate(A ): _UpperCAmelCase , _UpperCAmelCase = _run_operation(A , A , *A ) _UpperCAmelCase , _UpperCAmelCase = _run_operation(A , A , *A ) assert my_res == py_res assert str(A ) == str(A ) assert set(A ) == set(A ) assert len(A ) == len(A ) assert set(my.items() ) == set(py.items() ) def UpperCAmelCase ( ): '''simple docstring''' def is_public(A : str ) -> bool: return not name.startswith('_' ) _UpperCAmelCase = {name for name in dir({} ) if is_public(A )} _UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(A )} assert dict_public_names > hash_public_names
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split() _UpperCAmelCase = dict(zip(snake_case , range(len(snake_case ) ) ) ) _UpperCAmelCase = { 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>', } _UpperCAmelCase = { 'feature_size': 1, 'padding_value': 0.0, 'sampling_rate': 16000, 'return_attention_mask': False, 'do_normalize': True, } _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _UpperCAmelCase = os.path.join(self.tmpdirname , snake_case ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case ) + '\n' ) with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case ) + '\n' ) # load decoder from hub _UpperCAmelCase = 'hf-internal-testing/ngram-beam-search-decoder' def lowerCamelCase_ ( self , **snake_case ) -> Tuple: _UpperCAmelCase = self.add_kwargs_tokens_map.copy() kwargs.update(snake_case ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def lowerCamelCase_ ( self , **snake_case ) -> int: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **snake_case ) def lowerCamelCase_ ( self , **snake_case ) -> int: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **snake_case ) def lowerCamelCase_ ( self ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_decoder() _UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , snake_case ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match _UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['xx'] ) with self.assertRaisesRegex(snake_case , 'include' ): WavaVecaProcessorWithLM( tokenizer=snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_decoder() _UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) _UpperCAmelCase = floats_list((3, 1000) ) _UpperCAmelCase = feature_extractor(snake_case , return_tensors='np' ) _UpperCAmelCase = processor(snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_decoder() _UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) _UpperCAmelCase = 'This is a test string' _UpperCAmelCase = processor(text=snake_case ) _UpperCAmelCase = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase_ ( self , snake_case=(2, 10, 16) , snake_case=77 ) -> Optional[Any]: np.random.seed(snake_case ) return np.random.rand(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_decoder() _UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) _UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 ) _UpperCAmelCase = processor.decode(snake_case ) _UpperCAmelCase = decoder.decode_beams(snake_case )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('</s> <s> </s>' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['fork'], ['spawn']] ) def lowerCamelCase_ ( self , snake_case ) -> List[Any]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_decoder() _UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) _UpperCAmelCase = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: _UpperCAmelCase = processor.batch_decode(snake_case ) else: with get_context(snake_case ).Pool() as pool: _UpperCAmelCase = processor.batch_decode(snake_case , snake_case ) _UpperCAmelCase = list(snake_case ) with get_context('fork' ).Pool() as p: _UpperCAmelCase = decoder.decode_beams_batch(snake_case , snake_case ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(snake_case , decoded_processor.text ) self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text ) self.assertListEqual(snake_case , decoded_processor.logit_score ) self.assertListEqual(snake_case , decoded_processor.lm_score ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_decoder() _UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) _UpperCAmelCase = self._get_dummy_logits() _UpperCAmelCase = 15 _UpperCAmelCase = -20.0 _UpperCAmelCase = -4.0 _UpperCAmelCase = processor.batch_decode( snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , ) _UpperCAmelCase = decoded_processor_out.text _UpperCAmelCase = list(snake_case ) with get_context('fork' ).Pool() as pool: _UpperCAmelCase = decoder.decode_beams_batch( snake_case , snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , ) _UpperCAmelCase = [d[0][0] for d in decoded_decoder_out] _UpperCAmelCase = [d[0][2] for d in decoded_decoder_out] _UpperCAmelCase = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(snake_case , snake_case ) self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , snake_case ) self.assertTrue(np.array_equal(snake_case , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , snake_case , atol=1E-3 ) ) self.assertTrue(np.array_equal(snake_case , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , snake_case , atol=1E-3 ) ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_decoder() _UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) _UpperCAmelCase = self._get_dummy_logits() _UpperCAmelCase = 2.0 _UpperCAmelCase = 5.0 _UpperCAmelCase = -20.0 _UpperCAmelCase = True _UpperCAmelCase = processor.batch_decode( snake_case , alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , ) _UpperCAmelCase = decoded_processor_out.text _UpperCAmelCase = list(snake_case ) decoder.reset_params( alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , ) with get_context('fork' ).Pool() as pool: _UpperCAmelCase = decoder.decode_beams_batch( snake_case , snake_case , ) _UpperCAmelCase = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(snake_case , snake_case ) self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , snake_case ) _UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , snake_case ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) _UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] _UpperCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute() _UpperCAmelCase = os.listdir(snake_case ) _UpperCAmelCase = ['alphabet.json', 'language_model'] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = snapshot_download('hf-internal-testing/processor_with_lm' ) _UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(snake_case ) _UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] _UpperCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute() _UpperCAmelCase = os.listdir(snake_case ) _UpperCAmelCase = os.listdir(snake_case ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) _UpperCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' ) _UpperCAmelCase = floats_list((3, 1000) ) _UpperCAmelCase = processor_wavaveca(snake_case , return_tensors='np' ) _UpperCAmelCase = processor_auto(snake_case , return_tensors='np' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) _UpperCAmelCase = self._get_dummy_logits() _UpperCAmelCase = processor_wavaveca.batch_decode(snake_case ) _UpperCAmelCase = processor_auto.batch_decode(snake_case ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_decoder() _UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , ) @staticmethod def lowerCamelCase_ ( snake_case , snake_case ) -> Dict: _UpperCAmelCase = [d[key] for d in offsets] return retrieved_list def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) _UpperCAmelCase = self._get_dummy_logits()[0] _UpperCAmelCase = processor.decode(snake_case , output_word_offsets=snake_case ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('text' in outputs ) self.assertTrue('word_offsets' in outputs ) self.assertTrue(isinstance(snake_case , snake_case ) ) self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) _UpperCAmelCase = self._get_dummy_logits() _UpperCAmelCase = processor.batch_decode(snake_case , output_word_offsets=snake_case ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('text' in outputs ) self.assertTrue('word_offsets' in outputs ) self.assertTrue(isinstance(snake_case , snake_case ) ) self.assertListEqual( [' '.join(self.get_from_offsets(snake_case , 'word' ) ) for o in outputs['word_offsets']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowerCamelCase_ ( self ) -> List[str]: import torch _UpperCAmelCase = load_dataset('common_voice' , 'en' , split='train' , streaming=snake_case ) _UpperCAmelCase = ds.cast_column('audio' , datasets.Audio(sampling_rate=16000 ) ) _UpperCAmelCase = iter(snake_case ) _UpperCAmelCase = next(snake_case ) _UpperCAmelCase = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' ) _UpperCAmelCase = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train _UpperCAmelCase = processor(sample['audio']['array'] , return_tensors='pt' ).input_values with torch.no_grad(): _UpperCAmelCase = model(snake_case ).logits.cpu().numpy() _UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=snake_case ) _UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate _UpperCAmelCase = [ { 'start_time': d['start_offset'] * time_offset, 'end_time': d['end_offset'] * time_offset, 'word': d['word'], } for d in output['word_offsets'] ] _UpperCAmelCase = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL' # output words self.assertEqual(' '.join(self.get_from_offsets(snake_case , 'word' ) ) , snake_case ) self.assertEqual(' '.join(self.get_from_offsets(snake_case , 'word' ) ) , output.text ) # output times _UpperCAmelCase = torch.tensor(self.get_from_offsets(snake_case , 'start_time' ) ) _UpperCAmelCase = torch.tensor(self.get_from_offsets(snake_case , 'end_time' ) ) # fmt: off _UpperCAmelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) _UpperCAmelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) ) self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) )
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params lowercase = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['''memory_attention''', '''encoder_attn'''], ['''attention''', '''attn'''], ['''/''', '''.'''], ['''.LayerNorm.gamma''', '''_layer_norm.weight'''], ['''.LayerNorm.beta''', '''_layer_norm.bias'''], ['''r.layer_''', '''r.layers.'''], ['''output_proj''', '''out_proj'''], ['''ffn.dense_1.''', '''fc2.'''], ['''ffn.dense.''', '''fc1.'''], ['''ffn_layer_norm''', '''final_layer_norm'''], ['''kernel''', '''weight'''], ['''encoder_layer_norm.''', '''encoder.layer_norm.'''], ['''decoder_layer_norm.''', '''decoder.layer_norm.'''], ['''embeddings.weights''', '''shared.weight'''], ] def UpperCAmelCase ( A : Dict ): '''simple docstring''' for pegasus_name, hf_name in PATTERNS: _UpperCAmelCase = k.replace(A , A ) return k def UpperCAmelCase ( A : dict , A : dict ): '''simple docstring''' _UpperCAmelCase = DEFAULTS.copy() cfg_kwargs.update(A ) _UpperCAmelCase = PegasusConfig(**A ) _UpperCAmelCase = PegasusForConditionalGeneration(A ) _UpperCAmelCase = torch_model.model.state_dict() _UpperCAmelCase = {} for k, v in tf_weights.items(): _UpperCAmelCase = rename_state_dict_key(A ) if new_k not in sd: raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' ) if "dense" in k or "proj" in new_k: _UpperCAmelCase = v.T _UpperCAmelCase = torch.tensor(A , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}' # make sure embedding.padding_idx is respected _UpperCAmelCase = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] ) _UpperCAmelCase = mapping['shared.weight'] _UpperCAmelCase = mapping['shared.weight'] _UpperCAmelCase = {k: torch.zeros_like(A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping} mapping.update(**A ) _UpperCAmelCase , _UpperCAmelCase = torch_model.model.load_state_dict(A , strict=A ) _UpperCAmelCase = [ k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight'] ] assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}' assert extra == [], f'no matches found for the following tf keys {extra}' return torch_model def UpperCAmelCase ( A : Optional[Any]="./ckpt/aeslc/model.ckpt-32000" ): '''simple docstring''' _UpperCAmelCase = tf.train.list_variables(A ) _UpperCAmelCase = {} _UpperCAmelCase = ['Adafactor', 'global_step'] for name, shape in tqdm(A , desc='converting tf checkpoint to dict' ): _UpperCAmelCase = any(pat in name for pat in ignore_name ) if skip_key: continue _UpperCAmelCase = tf.train.load_variable(A , A ) _UpperCAmelCase = array return tf_weights def UpperCAmelCase ( A : str , A : str ): '''simple docstring''' _UpperCAmelCase = Path(A ).parent.name _UpperCAmelCase = task_specific_params[f'summarization_{dataset}']['max_position_embeddings'] _UpperCAmelCase = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=A ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(A ) # convert model _UpperCAmelCase = get_tf_weights_as_numpy(A ) _UpperCAmelCase = task_specific_params[f'summarization_{dataset}'] if dataset == "large": _UpperCAmelCase = task_specific_params _UpperCAmelCase = convert_pegasus(A , A ) torch_model.save_pretrained(A ) _UpperCAmelCase = torch_model.state_dict() sd.pop('model.decoder.embed_positions.weight' ) sd.pop('model.encoder.embed_positions.weight' ) torch.save(A , Path(A ) / 'pytorch_model.bin' ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') lowercase = parser.parse_args() if args.save_dir is None: lowercase = Path(args.tf_ckpt_path).parent.name lowercase = os.path.join('''pegasus''', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = GPTSanJapaneseTokenizer _UpperCAmelCase = False _UpperCAmelCase = {'''do_clean_text''': False, '''add_prefix_space''': False} def lowerCamelCase_ ( self ) -> List[str]: super().setUp() # fmt: off _UpperCAmelCase = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on _UpperCAmelCase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 _UpperCAmelCase = {'unk_token': '<unk>'} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(snake_case ) ) def lowerCamelCase_ ( self , **snake_case ) -> Tuple: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def lowerCamelCase_ ( self , snake_case ) -> List[Any]: _UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、㔺界。😀' _UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def lowerCamelCase_ ( self , snake_case ) -> List[str]: _UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) return text, ids def lowerCamelCase_ ( self ) -> int: pass # TODO add if relevant def lowerCamelCase_ ( self ) -> Dict: pass # TODO add if relevant def lowerCamelCase_ ( self ) -> List[Any]: pass # TODO add if relevant def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = self.get_tokenizer() # Testing tokenization _UpperCAmelCase = 'こんにちは、世界。 こんばんは、㔺界。' _UpperCAmelCase = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] _UpperCAmelCase = tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) # Testing conversion to ids without special tokens _UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] _UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case ) self.assertListEqual(snake_case , snake_case ) # Testing conversion to ids with special tokens _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] _UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case ) self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.get_tokenizer() # Testing tokenization _UpperCAmelCase = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' _UpperCAmelCase = 'こんにちは、、、、世界。こんばんは、、、、世界。' _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = tokenizer.decode(snake_case ) self.assertEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization _UpperCAmelCase = 'こんにちは、世界。' _UpperCAmelCase = 'こんばんは、㔺界。😀' _UpperCAmelCase = 'こんにちは、世界。こんばんは、世界。😀' _UpperCAmelCase = tokenizer.encode(prefix_text + input_text ) _UpperCAmelCase = tokenizer.encode('' , prefix_text=prefix_text + input_text ) _UpperCAmelCase = tokenizer.encode(snake_case , prefix_text=snake_case ) _UpperCAmelCase = tokenizer.decode(snake_case ) _UpperCAmelCase = tokenizer.decode(snake_case ) _UpperCAmelCase = tokenizer.decode(snake_case ) self.assertEqual(snake_case , snake_case ) self.assertEqual(snake_case , snake_case ) self.assertEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization _UpperCAmelCase = 'こんにちは、世界。' _UpperCAmelCase = 'こんばんは、㔺界。😀' _UpperCAmelCase = len(tokenizer.encode(snake_case ) ) - 2 _UpperCAmelCase = len(tokenizer.encode(snake_case ) ) - 2 _UpperCAmelCase = [1] + [0] * (len_prefix + len_text + 1) _UpperCAmelCase = [1] * (len_prefix + len_text + 1) + [0] _UpperCAmelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1) _UpperCAmelCase = tokenizer(prefix_text + input_text ).token_type_ids _UpperCAmelCase = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids _UpperCAmelCase = tokenizer(snake_case , prefix_text=snake_case ).token_type_ids self.assertListEqual(snake_case , snake_case ) self.assertListEqual(snake_case , snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) _UpperCAmelCase = tokenizer.encode('あンいワ' ) _UpperCAmelCase = tokenizer.encode('' , prefix_text='あンいワ' ) _UpperCAmelCase = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(snake_case ) , tokenizer.decode(snake_case ) ) self.assertEqual(tokenizer.decode(snake_case ) , tokenizer.decode(snake_case ) ) self.assertNotEqual(snake_case , snake_case ) self.assertNotEqual(snake_case , snake_case ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) _UpperCAmelCase = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] _UpperCAmelCase = tokenizer(snake_case , padding=snake_case ) _UpperCAmelCase = tokenizer.batch_encode_plus(snake_case , padding=snake_case ) # fmt: off _UpperCAmelCase = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]] _UpperCAmelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] _UpperCAmelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , snake_case ) self.assertListEqual(x_token.token_type_ids , snake_case ) self.assertListEqual(x_token.attention_mask , snake_case ) self.assertListEqual(x_token_a.input_ids , snake_case ) self.assertListEqual(x_token_a.token_type_ids , snake_case ) self.assertListEqual(x_token_a.attention_mask , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase_ ( self ) -> List[str]: # tokenizer has no padding token pass
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
1
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" lowercase = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' with open(os.path.dirname(A ) + '/grid.txt' ) as f: _UpperCAmelCase = [] # noqa: E741 for _ in range(20 ): l.append([int(A ) for x in f.readline().split()] ) _UpperCAmelCase = 0 # right for i in range(20 ): for j in range(17 ): _UpperCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: _UpperCAmelCase = temp # down for i in range(17 ): for j in range(20 ): _UpperCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: _UpperCAmelCase = temp # diagonal 1 for i in range(17 ): for j in range(17 ): _UpperCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: _UpperCAmelCase = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): _UpperCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: _UpperCAmelCase = temp return maximum if __name__ == "__main__": print(solution())
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" lowercase = range(2, 20 + 1) lowercase = [10**k for k in range(ks[-1] + 1)] lowercase = {} def UpperCAmelCase ( A : int , A : Tuple , A : List[str] , A : Tuple ): '''simple docstring''' _UpperCAmelCase = sum(a_i[j] for j in range(A , len(A ) ) ) _UpperCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) ) _UpperCAmelCase , _UpperCAmelCase = 0, 0 _UpperCAmelCase = n - i _UpperCAmelCase = memo.get(A ) if sub_memo is not None: _UpperCAmelCase = sub_memo.get(A ) if jumps is not None and len(A ) > 0: # find and make the largest jump without going over _UpperCAmelCase = -1 for _k in range(len(A ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _UpperCAmelCase = _k break if max_jump >= 0: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = jumps[max_jump] # since the difference between jumps is cached, add c _UpperCAmelCase = diff + c for j in range(min(A , len(A ) ) ): _UpperCAmelCase , _UpperCAmelCase = divmod(A , 10 ) if new_c > 0: add(A , A , A ) else: _UpperCAmelCase = [] else: _UpperCAmelCase = {c: []} _UpperCAmelCase = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _UpperCAmelCase , _UpperCAmelCase = next_term(A , k - 1 , i + dn , A ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _UpperCAmelCase , _UpperCAmelCase = compute(A , A , i + dn , A ) diff += _diff dn += terms_jumped _UpperCAmelCase = sub_memo[c] # keep jumps sorted by # of terms skipped _UpperCAmelCase = 0 while j < len(A ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(A , (diff, dn, k) ) return (diff, dn) def UpperCAmelCase ( A : Optional[int] , A : Any , A : Tuple , A : Optional[int] ): '''simple docstring''' if i >= n: return 0, i if k > len(A ): a_i.extend([0 for _ in range(k - len(A ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _UpperCAmelCase = i _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 0, 0 for j in range(len(A ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _UpperCAmelCase = ds_c + ds_b diff += addend _UpperCAmelCase = 0 for j in range(A ): _UpperCAmelCase = a_i[j] + addend _UpperCAmelCase , _UpperCAmelCase = divmod(A , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(A , A , A ) return diff, i - start_i def UpperCAmelCase ( A : Tuple , A : Dict , A : List[Any] ): '''simple docstring''' for j in range(A , len(A ) ): _UpperCAmelCase = digits[j] + addend if s >= 10: _UpperCAmelCase , _UpperCAmelCase = divmod(A , 10 ) _UpperCAmelCase = addend // 10 + quotient else: _UpperCAmelCase = s _UpperCAmelCase = addend // 10 if addend == 0: break while addend > 0: _UpperCAmelCase , _UpperCAmelCase = divmod(A , 10 ) digits.append(A ) def UpperCAmelCase ( A : int = 10**15 ): '''simple docstring''' _UpperCAmelCase = [1] _UpperCAmelCase = 1 _UpperCAmelCase = 0 while True: _UpperCAmelCase , _UpperCAmelCase = next_term(A , 20 , i + dn , A ) dn += terms_jumped if dn == n - i: break _UpperCAmelCase = 0 for j in range(len(A ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F'''{solution() = }''')
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" def UpperCAmelCase ( A : list[list[int]] , A : int , A : int , A : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(A ), len(grid[0] ) if ( min(A , A ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(A , row + 1 , A , A ) count += depth_first_search(A , row - 1 , A , A ) count += depth_first_search(A , A , col + 1 , A ) count += depth_first_search(A , A , col - 1 , A ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowercase = logging.getLogger(__name__) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=A, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=A, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=1_28, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=A, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) _UpperCAmelCase = import_module('tasks' ) try: _UpperCAmelCase = getattr(A , model_args.task_type ) _UpperCAmelCase = token_classification_task_clazz() except AttributeError: raise ValueError( f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _UpperCAmelCase = token_classification_task.get_labels(data_args.labels ) _UpperCAmelCase = dict(enumerate(A ) ) _UpperCAmelCase = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) _UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) _UpperCAmelCase = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets _UpperCAmelCase = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _UpperCAmelCase = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: _UpperCAmelCase = np.argmax(A , axis=2 ) _UpperCAmelCase , _UpperCAmelCase = preds.shape _UpperCAmelCase = [[] for _ in range(A )] _UpperCAmelCase = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: _UpperCAmelCase , _UpperCAmelCase = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator _UpperCAmelCase = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _UpperCAmelCase = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCAmelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate() _UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: _UpperCAmelCase = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(A ) _UpperCAmelCase , _UpperCAmelCase = align_predictions(A , A ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions _UpperCAmelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def UpperCAmelCase ( A : str ): '''simple docstring''' main() if __name__ == "__main__": main()
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
1
"""simple docstring""" def UpperCAmelCase ( A : List[str] ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = { '^': 3, '*': 2, '/': 2, '%': 2, '+': 1, '-': 1, } # Priority of each operator _UpperCAmelCase = len(A ) if (len(A ) > 7) else 7 # Print table header for output print( 'Symbol'.center(8 ) , 'Stack'.center(A ) , 'Postfix'.center(A ) , sep=' | ' , ) print('-' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(A ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(A ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(A ) == 0: stack.append(A ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(A ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(A ) # push x to stack print( x.center(8 ) , (''.join(A )).ljust(A ) , (''.join(A )).ljust(A ) , sep=' | ' , ) # Output in tabular format while len(A ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ' '.center(8 ) , (''.join(A )).ljust(A ) , (''.join(A )).ljust(A ) , sep=' | ' , ) # Output in tabular format return "".join(A ) # return Postfix as str def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = list(infix[::-1] ) # reverse the infix equation for i in range(len(A ) ): if infix[i] == "(": _UpperCAmelCase = ')' # change "(" to ")" elif infix[i] == ")": _UpperCAmelCase = '(' # change ")" to "(" return (infix_2_postfix(''.join(A ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": lowercase = input('''\nEnter an Infix Equation = ''') # Input an Infix equation lowercase = ''''''.join(Infix.split()) # Remove spaces from the input print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
1
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def UpperCAmelCase ( A : List[Any] ): '''simple docstring''' _UpperCAmelCase = botoa.client('iam' ) _UpperCAmelCase = { 'Version': '2012-10-17', 'Statement': [ {'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A , AssumeRolePolicyDocument=json.dumps(A , indent=2 ) ) _UpperCAmelCase = { 'Version': '2012-10-17', 'Statement': [ { 'Effect': 'Allow', 'Action': [ 'sagemaker:*', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'ecr:BatchCheckLayerAvailability', 'ecr:GetAuthorizationToken', 'cloudwatch:PutMetricData', 'cloudwatch:GetMetricData', 'cloudwatch:GetMetricStatistics', 'cloudwatch:ListMetrics', 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:DescribeLogStreams', 'logs:PutLogEvents', 'logs:GetLogEvents', 's3:CreateBucket', 's3:ListBucket', 's3:GetBucketLocation', 's3:GetObject', 's3:PutObject', ], 'Resource': '*', } ], } # attach policy to role iam_client.put_role_policy( RoleName=A , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(A , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'role {role_name} already exists. Using existing one' ) def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = botoa.client('iam' ) return iam_client.get_role(RoleName=A )["Role"]["Arn"] def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = _ask_options( 'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , A , ) _UpperCAmelCase = None if credentials_configuration == 0: _UpperCAmelCase = _ask_field('Enter your AWS Profile name: [default] ' , default='default' ) _UpperCAmelCase = aws_profile else: print( 'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,' '`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' ) _UpperCAmelCase = _ask_field('AWS Access Key ID: ' ) _UpperCAmelCase = aws_access_key_id _UpperCAmelCase = _ask_field('AWS Secret Access Key: ' ) _UpperCAmelCase = aws_secret_access_key _UpperCAmelCase = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' ) _UpperCAmelCase = aws_region _UpperCAmelCase = _ask_options( 'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , A , ) if role_management == 0: _UpperCAmelCase = _ask_field('Enter your IAM role name: ' ) else: _UpperCAmelCase = 'accelerate_sagemaker_execution_role' print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' ) _create_iam_role_for_sagemaker(A ) _UpperCAmelCase = _ask_field( 'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , ) _UpperCAmelCase = None if is_custom_docker_image: _UpperCAmelCase = _ask_field('Enter your Docker image: ' , lambda A : str(A ).lower() ) _UpperCAmelCase = _ask_field( 'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , ) _UpperCAmelCase = None if is_sagemaker_inputs_enabled: _UpperCAmelCase = _ask_field( 'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda A : str(A ).lower() , ) _UpperCAmelCase = _ask_field( 'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , ) _UpperCAmelCase = None if is_sagemaker_metrics_enabled: _UpperCAmelCase = _ask_field( 'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda A : str(A ).lower() , ) _UpperCAmelCase = _ask_options( 'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , ) _UpperCAmelCase = {} _UpperCAmelCase = _ask_field( 'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , ) if use_dynamo: _UpperCAmelCase = 'dynamo_' _UpperCAmelCase = _ask_options( 'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) _UpperCAmelCase = _ask_field( 'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , ) if use_custom_options: _UpperCAmelCase = _ask_options( 'Which mode do you want to use?' , A , lambda A : TORCH_DYNAMO_MODES[int(A )] , default='default' , ) _UpperCAmelCase = _ask_field( 'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , ) _UpperCAmelCase = _ask_field( 'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , ) _UpperCAmelCase = 'Which EC2 instance type you want to use for your training?' if distributed_type != SageMakerDistributedType.NO: _UpperCAmelCase = _ask_options( A , A , lambda A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" _UpperCAmelCase = _ask_field(A , lambda A : str(A ).lower() , default='ml.p3.2xlarge' ) _UpperCAmelCase = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): _UpperCAmelCase = _ask_field( 'How many machines do you want use? [1]: ' , A , default=1 , ) _UpperCAmelCase = _ask_options( 'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( 'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' ) return SageMakerConfig( image_uri=A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A , use_cpu=A , dynamo_config=A , eca_instance_type=A , profile=A , region=A , iam_role_name=A , mixed_precision=A , num_machines=A , sagemaker_inputs_file=A , sagemaker_metrics_file=A , )
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
1
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowercase = logging.get_logger(__name__) @add_end_docstrings( A, R''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''', ) class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> np.ndarray: if self.framework == "tf": _UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case ) else: raise ValueError('Unsupported framework' ) return masked_index def lowerCamelCase_ ( self , snake_case ) -> np.ndarray: _UpperCAmelCase = self.get_masked_index(snake_case ) _UpperCAmelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , ) def lowerCamelCase_ ( self , snake_case ) -> List[str]: if isinstance(snake_case , snake_case ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case=None , **snake_case ) -> Dict[str, GenericTensor]: if return_tensors is None: _UpperCAmelCase = self.framework _UpperCAmelCase = self.tokenizer(snake_case , return_tensors=snake_case ) self.ensure_exactly_one_mask_token(snake_case ) return model_inputs def lowerCamelCase_ ( self , snake_case ) -> List[Any]: _UpperCAmelCase = self.model(**snake_case ) _UpperCAmelCase = model_inputs['input_ids'] return model_outputs def lowerCamelCase_ ( self , snake_case , snake_case=5 , snake_case=None ) -> Optional[Any]: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _UpperCAmelCase = target_ids.shape[0] _UpperCAmelCase = model_outputs['input_ids'][0] _UpperCAmelCase = model_outputs['logits'] if self.framework == "tf": _UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _UpperCAmelCase = outputs.numpy() _UpperCAmelCase = outputs[0, masked_index, :] _UpperCAmelCase = stable_softmax(snake_case , axis=-1 ) if target_ids is not None: _UpperCAmelCase = tf.gather_nd(tf.squeeze(snake_case , 0 ) , target_ids.reshape(-1 , 1 ) ) _UpperCAmelCase = tf.expand_dims(snake_case , 0 ) _UpperCAmelCase = tf.math.top_k(snake_case , k=snake_case ) _UpperCAmelCase , _UpperCAmelCase = topk.values.numpy(), topk.indices.numpy() else: _UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _UpperCAmelCase = outputs[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=-1 ) if target_ids is not None: _UpperCAmelCase = probs[..., target_ids] _UpperCAmelCase , _UpperCAmelCase = probs.topk(snake_case ) _UpperCAmelCase = [] _UpperCAmelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _UpperCAmelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _UpperCAmelCase = input_ids.numpy().copy() if target_ids is not None: _UpperCAmelCase = target_ids[p].tolist() _UpperCAmelCase = p # Filter padding out: _UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _UpperCAmelCase = self.tokenizer.decode(snake_case , skip_special_tokens=snake_case ) _UpperCAmelCase = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence} row.append(snake_case ) result.append(snake_case ) if single_mask: return result[0] return result def lowerCamelCase_ ( self , snake_case , snake_case=None ) -> Dict: if isinstance(snake_case , snake_case ): _UpperCAmelCase = [targets] try: _UpperCAmelCase = self.tokenizer.get_vocab() except Exception: _UpperCAmelCase = {} _UpperCAmelCase = [] for target in targets: _UpperCAmelCase = vocab.get(snake_case , snake_case ) if id_ is None: _UpperCAmelCase = self.tokenizer( snake_case , add_special_tokens=snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , max_length=1 , truncation=snake_case , )['input_ids'] if len(snake_case ) == 0: logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' 'We cannot replace it with anything meaningful, ignoring it' ) continue _UpperCAmelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' ) target_ids.append(id_ ) _UpperCAmelCase = list(set(snake_case ) ) if len(snake_case ) == 0: raise ValueError('At least one target must be provided when passed.' ) _UpperCAmelCase = np.array(snake_case ) return target_ids def lowerCamelCase_ ( self , snake_case=None , snake_case=None ) -> Optional[int]: _UpperCAmelCase = {} if targets is not None: _UpperCAmelCase = self.get_target_ids(snake_case , snake_case ) _UpperCAmelCase = target_ids if top_k is not None: _UpperCAmelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' ) return {}, {}, postprocess_params def __call__( self , snake_case , *snake_case , **snake_case ) -> int: _UpperCAmelCase = super().__call__(snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
1
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = ['''vqvae'''] def __init__( self , snake_case , snake_case , snake_case , snake_case , ) -> List[Any]: super().__init__() self.register_modules(unet=snake_case , scheduler=snake_case , mel=snake_case , vqvae=snake_case ) def lowerCamelCase_ ( self ) -> int: return 50 if isinstance(self.scheduler , snake_case ) else 1000 @torch.no_grad() def __call__( self , snake_case = 1 , snake_case = None , snake_case = None , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = 0 , snake_case = None , snake_case = None , snake_case=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: _UpperCAmelCase = steps or self.get_default_steps() self.scheduler.set_timesteps(snake_case ) _UpperCAmelCase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: _UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: _UpperCAmelCase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=snake_case , device=self.device , ) _UpperCAmelCase = noise _UpperCAmelCase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(snake_case , snake_case ) _UpperCAmelCase = self.mel.audio_slice_to_image(snake_case ) _UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) _UpperCAmelCase = (input_image / 255) * 2 - 1 _UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: _UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(snake_case , 0 ) ).latent_dist.sample( generator=snake_case )[0] _UpperCAmelCase = self.vqvae.config.scaling_factor * input_images if start_step > 0: _UpperCAmelCase = self.scheduler.add_noise(snake_case , snake_case , self.scheduler.timesteps[start_step - 1] ) _UpperCAmelCase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) _UpperCAmelCase = int(mask_start_secs * pixels_per_second ) _UpperCAmelCase = int(mask_end_secs * pixels_per_second ) _UpperCAmelCase = self.scheduler.add_noise(snake_case , snake_case , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , snake_case ): _UpperCAmelCase = self.unet(snake_case , snake_case , snake_case )['sample'] else: _UpperCAmelCase = self.unet(snake_case , snake_case )['sample'] if isinstance(self.scheduler , snake_case ): _UpperCAmelCase = self.scheduler.step( model_output=snake_case , timestep=snake_case , sample=snake_case , eta=snake_case , generator=snake_case , )['prev_sample'] else: _UpperCAmelCase = self.scheduler.step( model_output=snake_case , timestep=snake_case , sample=snake_case , generator=snake_case , )['prev_sample'] if mask is not None: if mask_start > 0: _UpperCAmelCase = mask[:, step, :, :mask_start] if mask_end > 0: _UpperCAmelCase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance _UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images _UpperCAmelCase = self.vqvae.decode(snake_case )['sample'] _UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() _UpperCAmelCase = (images * 255).round().astype('uint8' ) _UpperCAmelCase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(snake_case , mode='RGB' ).convert('L' ) for _ in images) ) _UpperCAmelCase = [self.mel.image_to_audio(snake_case ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(snake_case )[:, np.newaxis, :] ) , **ImagePipelineOutput(snake_case ) ) @torch.no_grad() def lowerCamelCase_ ( self , snake_case , snake_case = 50 ) -> np.ndarray: assert isinstance(self.scheduler , snake_case ) self.scheduler.set_timesteps(snake_case ) _UpperCAmelCase = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) _UpperCAmelCase = (sample / 255) * 2 - 1 _UpperCAmelCase = torch.Tensor(snake_case ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): _UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps _UpperCAmelCase = self.scheduler.alphas_cumprod[t] _UpperCAmelCase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) _UpperCAmelCase = 1 - alpha_prod_t _UpperCAmelCase = self.unet(snake_case , snake_case )['sample'] _UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output _UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) _UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def lowerCamelCase_ ( snake_case , snake_case , snake_case ) -> torch.Tensor: _UpperCAmelCase = acos(torch.dot(torch.flatten(snake_case ) , torch.flatten(snake_case ) ) / torch.norm(snake_case ) / torch.norm(snake_case ) ) return sin((1 - alpha) * theta ) * xa / sin(snake_case ) + sin(alpha * theta ) * xa / sin(snake_case )
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''nat''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=4 , snake_case=3 , snake_case=64 , snake_case=[3, 4, 6, 5] , snake_case=[2, 4, 8, 16] , snake_case=7 , snake_case=3.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=0.02 , snake_case=1E-5 , snake_case=0.0 , snake_case=None , snake_case=None , **snake_case , ) -> List[str]: super().__init__(**snake_case ) _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = kernel_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
1
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline lowercase = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": lowercase = '''hopper-medium-v2''' lowercase = gym.make(env_name) lowercase = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) lowercase = env.reset() lowercase = 0 lowercase = 0 lowercase = 10_00 lowercase = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy lowercase = pipeline(obs, planning_horizon=32) # execute action in environment lowercase , lowercase , lowercase , lowercase = env.step(denorm_actions) lowercase = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' F''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) lowercase = next_observation except KeyboardInterrupt: pass print(F'''Total reward: {total_reward}''')
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowercase = logging.get_logger(__name__) lowercase = { '''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''', # See all Marian models at https://huggingface.co/models?filter=marian } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''marian''' _UpperCAmelCase = ['''past_key_values'''] _UpperCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , snake_case=58101 , snake_case=None , snake_case=1024 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=1024 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=58100 , snake_case=False , snake_case=58100 , snake_case=0 , snake_case=0 , snake_case=True , **snake_case , ) -> Dict: _UpperCAmelCase = vocab_size _UpperCAmelCase = decoder_vocab_size or vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = d_model _UpperCAmelCase = encoder_ffn_dim _UpperCAmelCase = encoder_layers _UpperCAmelCase = encoder_attention_heads _UpperCAmelCase = decoder_ffn_dim _UpperCAmelCase = decoder_layers _UpperCAmelCase = decoder_attention_heads _UpperCAmelCase = dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = activation_function _UpperCAmelCase = init_std _UpperCAmelCase = encoder_layerdrop _UpperCAmelCase = decoder_layerdrop _UpperCAmelCase = use_cache _UpperCAmelCase = encoder_layers _UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True _UpperCAmelCase = share_encoder_decoder_embeddings super().__init__( pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , forced_eos_token_id=snake_case , **snake_case , ) class lowercase__ ( A ): '''simple docstring''' @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _UpperCAmelCase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: _UpperCAmelCase = {0: 'batch'} _UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'} _UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(snake_case , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. _UpperCAmelCase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: _UpperCAmelCase , _UpperCAmelCase = self.num_layers for i in range(snake_case ): _UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'} _UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'} else: _UpperCAmelCase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _UpperCAmelCase = super().outputs else: _UpperCAmelCase = super(snake_case , self ).outputs if self.use_past: _UpperCAmelCase , _UpperCAmelCase = self.num_layers for i in range(snake_case ): _UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'} _UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]: _UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( snake_case , snake_case , snake_case , snake_case , snake_case ) # Generate decoder inputs _UpperCAmelCase = seq_length if not self.use_past else 1 _UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( snake_case , snake_case , snake_case , snake_case , snake_case ) _UpperCAmelCase = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _UpperCAmelCase = dict(**snake_case , **snake_case ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape _UpperCAmelCase = common_inputs['decoder_input_ids'].shape[1] _UpperCAmelCase , _UpperCAmelCase = self.num_attention_heads _UpperCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _UpperCAmelCase = decoder_seq_length + 3 _UpperCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _UpperCAmelCase = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(snake_case , snake_case )] , dim=1 ) _UpperCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _UpperCAmelCase , _UpperCAmelCase = self.num_layers _UpperCAmelCase = min(snake_case , snake_case ) _UpperCAmelCase = max(snake_case , snake_case ) - min_num_layers _UpperCAmelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(snake_case ): common_inputs["past_key_values"].append( ( torch.zeros(snake_case ), torch.zeros(snake_case ), torch.zeros(snake_case ), torch.zeros(snake_case ), ) ) # TODO: test this. _UpperCAmelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(snake_case , snake_case ): common_inputs["past_key_values"].append((torch.zeros(snake_case ), torch.zeros(snake_case )) ) return common_inputs def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]: _UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( snake_case , snake_case , snake_case , snake_case , snake_case ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values _UpperCAmelCase = seqlen + 2 _UpperCAmelCase , _UpperCAmelCase = self.num_layers _UpperCAmelCase , _UpperCAmelCase = self.num_attention_heads _UpperCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _UpperCAmelCase = common_inputs['attention_mask'].dtype _UpperCAmelCase = torch.cat( [common_inputs['attention_mask'], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 ) _UpperCAmelCase = [ (torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(snake_case ) ] return common_inputs def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = tokenizer.num_special_tokens_to_add(snake_case ) _UpperCAmelCase = compute_effective_axis_dimension( snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size _UpperCAmelCase = dict(tokenizer(snake_case , return_tensors=snake_case ) ) return common_inputs def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case ) else: _UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm( snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case ) return common_inputs def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> str: if self.task in ["default", "seq2seq-lm"]: _UpperCAmelCase = super()._flatten_past_key_values_(snake_case , snake_case , snake_case , snake_case ) else: _UpperCAmelCase = super(snake_case , self )._flatten_past_key_values_( snake_case , snake_case , snake_case , snake_case ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters lowercase = (7_20, 12_80) # Height, Width lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it. lowercase = 1 / 1_00 lowercase = '''''' lowercase = '''''' lowercase = '''''' lowercase = 2_50 def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = get_dataset(A , A ) for index in range(A ): _UpperCAmelCase = random.sample(range(len(A ) ) , 4 ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = update_image_and_anno( A , A , A , A , A , filter_scale=A , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _UpperCAmelCase = random_chars(32 ) _UpperCAmelCase = path.split(os.sep )[-1].rsplit('.' , 1 )[0] _UpperCAmelCase = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , A , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) _UpperCAmelCase = [] for anno in new_annos: _UpperCAmelCase = anno[3] - anno[1] _UpperCAmelCase = anno[4] - anno[2] _UpperCAmelCase = anno[1] + width / 2 _UpperCAmelCase = anno[2] + height / 2 _UpperCAmelCase = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(A ) with open(f'{file_root}.txt' , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def UpperCAmelCase ( A : str , A : str ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] for label_file in glob.glob(os.path.join(A , '*.txt' ) ): _UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(A ) as in_file: _UpperCAmelCase = in_file.readlines() _UpperCAmelCase = os.path.join(A , f'{label_name}.jpg' ) _UpperCAmelCase = [] for obj_list in obj_lists: _UpperCAmelCase = obj_list.rstrip('\n' ).split(' ' ) _UpperCAmelCase = float(obj[1] ) - float(obj[3] ) / 2 _UpperCAmelCase = float(obj[2] ) - float(obj[4] ) / 2 _UpperCAmelCase = float(obj[1] ) + float(obj[3] ) / 2 _UpperCAmelCase = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(A ) labels.append(A ) return img_paths, labels def UpperCAmelCase ( A : list , A : list , A : list[int] , A : tuple[int, int] , A : tuple[float, float] , A : float = 0.0 , ): '''simple docstring''' _UpperCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) _UpperCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _UpperCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _UpperCAmelCase = int(scale_x * output_size[1] ) _UpperCAmelCase = int(scale_y * output_size[0] ) _UpperCAmelCase = [] _UpperCAmelCase = [] for i, index in enumerate(A ): _UpperCAmelCase = all_img_list[index] path_list.append(A ) _UpperCAmelCase = all_annos[index] _UpperCAmelCase = cva.imread(A ) if i == 0: # top-left _UpperCAmelCase = cva.resize(A , (divid_point_x, divid_point_y) ) _UpperCAmelCase = img for bbox in img_annos: _UpperCAmelCase = bbox[1] * scale_x _UpperCAmelCase = bbox[2] * scale_y _UpperCAmelCase = bbox[3] * scale_x _UpperCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right _UpperCAmelCase = cva.resize(A , (output_size[1] - divid_point_x, divid_point_y) ) _UpperCAmelCase = img for bbox in img_annos: _UpperCAmelCase = scale_x + bbox[1] * (1 - scale_x) _UpperCAmelCase = bbox[2] * scale_y _UpperCAmelCase = scale_x + bbox[3] * (1 - scale_x) _UpperCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left _UpperCAmelCase = cva.resize(A , (divid_point_x, output_size[0] - divid_point_y) ) _UpperCAmelCase = img for bbox in img_annos: _UpperCAmelCase = bbox[1] * scale_x _UpperCAmelCase = scale_y + bbox[2] * (1 - scale_y) _UpperCAmelCase = bbox[3] * scale_x _UpperCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right _UpperCAmelCase = cva.resize( A , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) _UpperCAmelCase = img for bbox in img_annos: _UpperCAmelCase = scale_x + bbox[1] * (1 - scale_x) _UpperCAmelCase = scale_y + bbox[2] * (1 - scale_y) _UpperCAmelCase = scale_x + bbox[3] * (1 - scale_x) _UpperCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: _UpperCAmelCase = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def UpperCAmelCase ( A : int ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" _UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(A ) for _ in range(A ) ) if __name__ == "__main__": main() print('''DONE ✅''')
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
1
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def UpperCAmelCase ( A : float , A : float ): '''simple docstring''' if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=2 , snake_case=True , snake_case=False , snake_case=10 , snake_case=3 , snake_case=32 * 4 , snake_case=32 * 6 , snake_case=4 , snake_case=32 , ) -> Optional[Any]: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowerCamelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case ) , config.decoder_config.decoder_layers ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=False ) -> int: with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(pixel_values=snake_case , pixel_mask=snake_case ) _UpperCAmelCase = model(snake_case , output_hidden_states=snake_case ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case , snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str: _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case ) model.to(snake_case ) model.eval() def comm_check_on_output(snake_case ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=snake_case , pixel_mask=snake_case ) _UpperCAmelCase = model(snake_case ) comm_check_on_output(snake_case ) _UpperCAmelCase = model( pixel_values=snake_case , pixel_mask=snake_case , mask_labels=snake_case , class_labels=snake_case ) comm_check_on_output(snake_case ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: self.config_tester.run_common_tests() def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case , **snake_case , output_hidden_states=snake_case ) def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case ) @unittest.skip(reason='MaskFormer does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='MaskFormer is not a generative model' ) def lowerCamelCase_ ( self ) -> Any: pass @unittest.skip(reason='MaskFormer does not use token embeddings' ) def lowerCamelCase_ ( self ) -> Tuple: pass @require_torch_multi_gpu @unittest.skip( reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowerCamelCase_ ( self ) -> List[Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { 'pixel_values': torch.randn((2, 3, *size) , device=snake_case ), 'mask_labels': torch.randn((2, 10, *size) , device=snake_case ), 'class_labels': torch.zeros(2 , 10 , device=snake_case ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case ) _UpperCAmelCase = model(**snake_case ) self.assertTrue(outputs.loss is not None ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case , **snake_case , output_hidden_states=snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ).to(snake_case ) _UpperCAmelCase = model(**snake_case , output_attentions=snake_case ) self.assertTrue(outputs.attentions is not None ) def lowerCamelCase_ ( self ) -> Any: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.train() _UpperCAmelCase = model(snake_case , mask_labels=snake_case , class_labels=snake_case ).loss loss.backward() def lowerCamelCase_ ( self ) -> Dict: # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.train() _UpperCAmelCase = model(snake_case , mask_labels=snake_case , class_labels=snake_case ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowercase = 1E-4 def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' ) if is_vision_available() else None ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case , return_tensors='pt' ).to(snake_case ) _UpperCAmelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) _UpperCAmelCase = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(snake_case ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) ) _UpperCAmelCase = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(snake_case ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) ) _UpperCAmelCase = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(snake_case ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case , atol=snake_case ) ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' ) .to(snake_case ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case , return_tensors='pt' ).to(snake_case ) _UpperCAmelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3737124, -1.7724937, -1.9364233], [-1.5977281, -1.9867939, -2.1523695], [-1.5795398, -1.9269832, -2.093942], ] _UpperCAmelCase = torch.tensor(snake_case ).to(snake_case ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' ) .to(snake_case ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case , return_tensors='pt' ).to(snake_case ) _UpperCAmelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _UpperCAmelCase = torch.tensor(snake_case ).to(snake_case ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case ) ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' ) .to(snake_case ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) _UpperCAmelCase = inputs['pixel_values'].to(snake_case ) _UpperCAmelCase = [el.to(snake_case ) for el in inputs['mask_labels']] _UpperCAmelCase = [el.to(snake_case ) for el in inputs['class_labels']] with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) self.assertTrue(outputs.loss is not None )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowercase = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowercase = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowercase = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowercase = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowercase = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]), ('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowercase = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowercase = ( ('''JH AH TH KH QH''', 23), ('''JH 9H TH KH QH''', 22), ('''JC KH JS JD JH''', 21), ('''KH KC 3S 3H 3D''', 20), ('''8C 9C 5C 3C TC''', 19), ('''JS QS 9H TS KH''', 18), ('''7C 7S KH 2H 7H''', 17), ('''3C KH 5D 5S KH''', 16), ('''QH 8H KD JH 8S''', 15), ('''2D 6D 9D TH 7D''', 14), ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = randrange(len(A ) ), randrange(len(A ) ) _UpperCAmelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] _UpperCAmelCase , _UpperCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def UpperCAmelCase ( A : int = 100 ): '''simple docstring''' return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('hand, expected' , A ) def UpperCAmelCase ( A : Tuple , A : str ): '''simple docstring''' assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , A ) def UpperCAmelCase ( A : Optional[Any] , A : Optional[int] ): '''simple docstring''' assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , A ) def UpperCAmelCase ( A : Optional[int] , A : Dict , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , A ) def UpperCAmelCase ( A : Tuple , A : str ): '''simple docstring''' assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , A ) def UpperCAmelCase ( A : Optional[int] , A : Tuple ): '''simple docstring''' assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , A ) def UpperCAmelCase ( A : str , A : List[str] , A : Any ): '''simple docstring''' assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def UpperCAmelCase ( A : str , A : Dict , A : Tuple ): '''simple docstring''' assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = [PokerHand(A ) for hand in SORTED_HANDS] _UpperCAmelCase = poker_hands.copy() shuffle(A ) _UpperCAmelCase = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = PokerHand('2C 4S AS 3D 5C' ) _UpperCAmelCase = True _UpperCAmelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = os.path.abspath(os.path.dirname(A ) ) _UpperCAmelCase = os.path.join(A , 'poker_hands.txt' ) with open(A ) as file_hand: for line in file_hand: _UpperCAmelCase = line[:14].strip() _UpperCAmelCase = line[15:].strip() _UpperCAmelCase , _UpperCAmelCase = PokerHand(A ), PokerHand(A ) _UpperCAmelCase = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' _UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def UpperCAmelCase ( ): '''simple docstring''' print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" import random def UpperCAmelCase ( A : Optional[Any] , A : Any , A : str ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , A ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def UpperCAmelCase ( A : Dict , A : str , A : str ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(A , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(A , A , A ) quick_sort_random( A , A , A ) # recursive quicksort to the left of the pivot point quick_sort_random( A , pivot_index + 1 , A ) # recursive quicksort to the right of the pivot point def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = input('Enter numbers separated by a comma:\n' ).strip() _UpperCAmelCase = [int(A ) for item in user_input.split(',' )] quick_sort_random(A , 0 , len(A ) ) print(A ) if __name__ == "__main__": main()
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule lowercase = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.dirname(os.path.realpath(A ) ) _UpperCAmelCase = os.path.join(A , 'triangle.txt' ) with open(A ) as f: _UpperCAmelCase = f.readlines() _UpperCAmelCase = [] for line in triangle: _UpperCAmelCase = [] for number in line.strip().split(' ' ): numbers_from_line.append(int(A ) ) a.append(A ) for i in range(1 , len(A ) ): for j in range(len(a[i] ) ): _UpperCAmelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0 _UpperCAmelCase = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(A , A ) return max(a[-1] ) if __name__ == "__main__": print(solution())
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import math def UpperCAmelCase ( A : list , A : int ): '''simple docstring''' _UpperCAmelCase = len(A ) _UpperCAmelCase = int(math.floor(math.sqrt(A ) ) ) _UpperCAmelCase = 0 while arr[min(A , A ) - 1] < x: _UpperCAmelCase = step step += int(math.floor(math.sqrt(A ) ) ) if prev >= n: return -1 while arr[prev] < x: _UpperCAmelCase = prev + 1 if prev == min(A , A ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": lowercase = input('''Enter numbers separated by a comma:\n''').strip() lowercase = [int(item) for item in user_input.split(''',''')] lowercase = int(input('''Enter the number to be searched:\n''')) lowercase = jump_search(arr, x) if res == -1: print('''Number not found!''') else: print(F'''Number {x} is at index {res}''')
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=None , snake_case=None ) -> List[str]: _UpperCAmelCase = data _UpperCAmelCase = previous _UpperCAmelCase = next_node def __str__( self ) -> str: return f'{self.data}' def lowerCamelCase_ ( self ) -> int: return self.data def lowerCamelCase_ ( self ) -> Optional[Any]: return self.next def lowerCamelCase_ ( self ) -> Any: return self.previous class lowercase__ : '''simple docstring''' def __init__( self , snake_case ) -> Tuple: _UpperCAmelCase = head def __iter__( self ) -> str: return self def lowerCamelCase_ ( self ) -> Any: if not self.current: raise StopIteration else: _UpperCAmelCase = self.current.get_data() _UpperCAmelCase = self.current.get_next() return value class lowercase__ : '''simple docstring''' def __init__( self ) -> Union[str, Any]: _UpperCAmelCase = None # First node in list _UpperCAmelCase = None # Last node in list def __str__( self ) -> Optional[Any]: _UpperCAmelCase = self.head _UpperCAmelCase = [] while current is not None: nodes.append(current.get_data() ) _UpperCAmelCase = current.get_next() return " ".join(str(snake_case ) for node in nodes ) def __contains__( self , snake_case ) -> List[str]: _UpperCAmelCase = self.head while current: if current.get_data() == value: return True _UpperCAmelCase = current.get_next() return False def __iter__( self ) -> Tuple: return LinkedListIterator(self.head ) def lowerCamelCase_ ( self ) -> Tuple: if self.head: return self.head.get_data() return None def lowerCamelCase_ ( self ) -> Union[str, Any]: if self.tail: return self.tail.get_data() return None def lowerCamelCase_ ( self , snake_case ) -> None: if self.head is None: _UpperCAmelCase = node _UpperCAmelCase = node else: self.insert_before_node(self.head , snake_case ) def lowerCamelCase_ ( self , snake_case ) -> None: if self.head is None: self.set_head(snake_case ) else: self.insert_after_node(self.tail , snake_case ) def lowerCamelCase_ ( self , snake_case ) -> None: _UpperCAmelCase = Node(snake_case ) if self.head is None: self.set_head(snake_case ) else: self.set_tail(snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case ) -> None: _UpperCAmelCase = node _UpperCAmelCase = node.previous if node.get_previous() is None: _UpperCAmelCase = node_to_insert else: _UpperCAmelCase = node_to_insert _UpperCAmelCase = node_to_insert def lowerCamelCase_ ( self , snake_case , snake_case ) -> None: _UpperCAmelCase = node _UpperCAmelCase = node.next if node.get_next() is None: _UpperCAmelCase = node_to_insert else: _UpperCAmelCase = node_to_insert _UpperCAmelCase = node_to_insert def lowerCamelCase_ ( self , snake_case , snake_case ) -> None: _UpperCAmelCase = 1 _UpperCAmelCase = Node(snake_case ) _UpperCAmelCase = self.head while node: if current_position == position: self.insert_before_node(snake_case , snake_case ) return current_position += 1 _UpperCAmelCase = node.next self.insert_after_node(self.tail , snake_case ) def lowerCamelCase_ ( self , snake_case ) -> Node: _UpperCAmelCase = self.head while node: if node.get_data() == item: return node _UpperCAmelCase = node.get_next() raise Exception('Node not found' ) def lowerCamelCase_ ( self , snake_case ) -> Tuple: if (node := self.get_node(snake_case )) is not None: if node == self.head: _UpperCAmelCase = self.head.get_next() if node == self.tail: _UpperCAmelCase = self.tail.get_previous() self.remove_node_pointers(snake_case ) @staticmethod def lowerCamelCase_ ( snake_case ) -> None: if node.get_next(): _UpperCAmelCase = node.previous if node.get_previous(): _UpperCAmelCase = node.next _UpperCAmelCase = None _UpperCAmelCase = None def lowerCamelCase_ ( self ) -> Any: return self.head is None def UpperCAmelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 _UpperCAmelCase = 1 _UpperCAmelCase = 1 while repunit: _UpperCAmelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def UpperCAmelCase ( A : int = 100_0000 ): '''simple docstring''' _UpperCAmelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(A ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F'''{solution() = }''')
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
1
"""simple docstring""" import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def UpperCAmelCase ( A : Tuple ): '''simple docstring''' return x + 2 class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = 'x = 3' _UpperCAmelCase = {} _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) assert result == 3 self.assertDictEqual(snake_case , {'x': 3} ) _UpperCAmelCase = 'x = y' _UpperCAmelCase = {'y': 5} _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case , {'x': 5, 'y': 5} ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = 'y = add_two(x)' _UpperCAmelCase = {'x': 3} _UpperCAmelCase = evaluate(snake_case , {'add_two': add_two} , state=snake_case ) assert result == 5 self.assertDictEqual(snake_case , {'x': 3, 'y': 5} ) # Won't work without the tool with CaptureStdout() as out: _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) assert result is None assert "tried to execute add_two" in out.out def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = 'x = 3' _UpperCAmelCase = {} _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) assert result == 3 self.assertDictEqual(snake_case , {'x': 3} ) def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'test_dict = {\'x\': x, \'y\': add_two(x)}' _UpperCAmelCase = {'x': 3} _UpperCAmelCase = evaluate(snake_case , {'add_two': add_two} , state=snake_case ) self.assertDictEqual(snake_case , {'x': 3, 'y': 5} ) self.assertDictEqual(snake_case , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = 'x = 3\ny = 5' _UpperCAmelCase = {} _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case , {'x': 3, 'y': 5} ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = 'text = f\'This is x: {x}.\'' _UpperCAmelCase = {'x': 3} _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(snake_case , {'x': 3, 'text': 'This is x: 3.'} ) def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = 'if x <= 3:\n y = 2\nelse:\n y = 5' _UpperCAmelCase = {'x': 3} _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(snake_case , {'x': 3, 'y': 2} ) _UpperCAmelCase = {'x': 8} _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case , {'x': 8, 'y': 5} ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = 'test_list = [x, add_two(x)]' _UpperCAmelCase = {'x': 3} _UpperCAmelCase = evaluate(snake_case , {'add_two': add_two} , state=snake_case ) self.assertListEqual(snake_case , [3, 5] ) self.assertDictEqual(snake_case , {'x': 3, 'test_list': [3, 5]} ) def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = 'y = x' _UpperCAmelCase = {'x': 3} _UpperCAmelCase = evaluate(snake_case , {} , state=snake_case ) assert result == 3 self.assertDictEqual(snake_case , {'x': 3, 'y': 3} ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = 'test_list = [x, add_two(x)]\ntest_list[1]' _UpperCAmelCase = {'x': 3} _UpperCAmelCase = evaluate(snake_case , {'add_two': add_two} , state=snake_case ) assert result == 5 self.assertDictEqual(snake_case , {'x': 3, 'test_list': [3, 5]} ) _UpperCAmelCase = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']' _UpperCAmelCase = {'x': 3} _UpperCAmelCase = evaluate(snake_case , {'add_two': add_two} , state=snake_case ) assert result == 5 self.assertDictEqual(snake_case , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = 'x = 0\nfor i in range(3):\n x = i' _UpperCAmelCase = {} _UpperCAmelCase = evaluate(snake_case , {'range': range} , state=snake_case ) assert result == 2 self.assertDictEqual(snake_case , {'x': 2, 'i': 2} )
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
1
"""simple docstring""" import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = int(A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = t // 3600, (t // 60) % 60, t % 60 return f'{h}:{m:02d}:{s:02d}' if h != 0 else f'{m:02d}:{s:02d}' def UpperCAmelCase ( A : Any , A : List[Any] , A : List[Any] , A : Union[str, Any] , A : Optional[Any]=300 ): '''simple docstring''' return f'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n ' def UpperCAmelCase ( A : Any ): '''simple docstring''' _UpperCAmelCase = '<table border="1" class="dataframe">\n' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f' <th>{i}</th>\n' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: _UpperCAmelCase = f'{elt:.6f}' if isinstance(A , A ) else str(A ) html_code += f' <td>{elt}</td>\n' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class lowercase__ : '''simple docstring''' _UpperCAmelCase = 5 _UpperCAmelCase = 0.2 def __init__( self , snake_case , snake_case = None , snake_case = True , snake_case = None , snake_case = 300 , ) -> Any: _UpperCAmelCase = total _UpperCAmelCase = '' if prefix is None else prefix _UpperCAmelCase = leave _UpperCAmelCase = parent _UpperCAmelCase = width _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None def lowerCamelCase_ ( self , snake_case , snake_case = False , snake_case = None ) -> Tuple: _UpperCAmelCase = value if comment is not None: _UpperCAmelCase = comment if self.last_value is None: _UpperCAmelCase = _UpperCAmelCase = time.time() _UpperCAmelCase = _UpperCAmelCase = value _UpperCAmelCase = _UpperCAmelCase = None _UpperCAmelCase = self.warmup _UpperCAmelCase = 1 self.update_bar(snake_case ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 _UpperCAmelCase = time.time() _UpperCAmelCase = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: _UpperCAmelCase = self.elapsed_time / (value - self.start_value) else: _UpperCAmelCase = None if value >= self.total: _UpperCAmelCase = self.total _UpperCAmelCase = None if not self.leave: self.close() elif self.average_time_per_item is not None: _UpperCAmelCase = self.average_time_per_item * (self.total - value) self.update_bar(snake_case ) _UpperCAmelCase = value _UpperCAmelCase = current_time if self.average_time_per_item is None: _UpperCAmelCase = 1 else: _UpperCAmelCase = max(int(self.update_every / self.average_time_per_item ) , 1 ) def lowerCamelCase_ ( self , snake_case , snake_case=None ) -> int: _UpperCAmelCase = ' ' * (len(str(self.total ) ) - len(str(snake_case ) )) + str(snake_case ) if self.elapsed_time is None: _UpperCAmelCase = f'[{spaced_value}/{self.total} : < :' elif self.predicted_remaining is None: _UpperCAmelCase = f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}' else: _UpperCAmelCase = ( f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <' f' {format_time(self.predicted_remaining )}' ) self.label += f', {1/self.average_time_per_item:.2f} it/s' self.label += "]" if self.comment is None or len(self.comment ) == 0 else f', {self.comment}]' self.display() def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: _UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def lowerCamelCase_ ( self ) -> Optional[Any]: if self.parent is None and self.output is not None: self.output.update(disp.HTML('' ) ) class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case , snake_case=None ) -> Union[str, Any]: super().__init__(snake_case ) _UpperCAmelCase = None if column_names is None else [column_names] _UpperCAmelCase = None def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: _UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def lowerCamelCase_ ( self , snake_case ) -> List[str]: if self.inner_table is None: _UpperCAmelCase = [list(values.keys() ), list(values.values() )] else: _UpperCAmelCase = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(snake_case ) _UpperCAmelCase = columns self.inner_table.append([values[c] for c in columns] ) def lowerCamelCase_ ( self , snake_case , snake_case=None , snake_case=300 ) -> Dict: _UpperCAmelCase = NotebookProgressBar(snake_case , prefix=snake_case , parent=self , width=snake_case ) return self.child_bar def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = None self.display() class lowercase__ ( A ): '''simple docstring''' def __init__( self ) -> Tuple: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = False def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , **snake_case ) -> int: _UpperCAmelCase = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step' _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = [self.first_column] + ['Training Loss'] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('Validation Loss' ) _UpperCAmelCase = NotebookTrainingTracker(state.max_steps , snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , **snake_case ) -> Union[str, Any]: _UpperCAmelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f'{state.epoch:.2f}' self.training_tracker.update( state.global_step + 1 , comment=f'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , ) _UpperCAmelCase = False def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Dict: if not has_length(snake_case ): return if self.prediction_bar is None: if self.training_tracker is not None: _UpperCAmelCase = self.training_tracker.add_child(len(snake_case ) ) else: _UpperCAmelCase = NotebookProgressBar(len(snake_case ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , **snake_case ) -> List[str]: if self.prediction_bar is not None: self.prediction_bar.close() _UpperCAmelCase = None def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Union[str, Any]: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: _UpperCAmelCase = {'Training Loss': logs['loss']} # First column is necessarily Step sine we're not in epoch eval strategy _UpperCAmelCase = state.global_step self.training_tracker.write_line(snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Tuple: if self.training_tracker is not None: _UpperCAmelCase = {'Training Loss': 'No log', 'Validation Loss': 'No log'} for log in reversed(state.log_history ): if "loss" in log: _UpperCAmelCase = log['loss'] break if self.first_column == "Epoch": _UpperCAmelCase = int(state.epoch ) else: _UpperCAmelCase = state.global_step _UpperCAmelCase = 'eval' for k in metrics: if k.endswith('_loss' ): _UpperCAmelCase = re.sub(r'\_loss$' , '' , snake_case ) _UpperCAmelCase = metrics.pop('total_flos' , snake_case ) _UpperCAmelCase = metrics.pop('epoch' , snake_case ) _UpperCAmelCase = metrics.pop(f'{metric_key_prefix}_runtime' , snake_case ) _UpperCAmelCase = metrics.pop(f'{metric_key_prefix}_samples_per_second' , snake_case ) _UpperCAmelCase = metrics.pop(f'{metric_key_prefix}_steps_per_second' , snake_case ) _UpperCAmelCase = metrics.pop(f'{metric_key_prefix}_jit_compilation_time' , snake_case ) for k, v in metrics.items(): if k == f'{metric_key_prefix}_loss': _UpperCAmelCase = v else: _UpperCAmelCase = k.split('_' ) _UpperCAmelCase = ' '.join([part.capitalize() for part in splits[1:]] ) _UpperCAmelCase = v self.training_tracker.write_line(snake_case ) self.training_tracker.remove_child() _UpperCAmelCase = None # Evaluation takes a long time so we should force the next update. _UpperCAmelCase = True def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , **snake_case ) -> Any: self.training_tracker.update( state.global_step , comment=f'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=snake_case ) _UpperCAmelCase = None
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
1
"""simple docstring""" import random def UpperCAmelCase ( A : list , A : List[Any] ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], [] for element in data: if element < pivot: less.append(A ) elif element > pivot: greater.append(A ) else: equal.append(A ) return less, equal, greater def UpperCAmelCase ( A : list , A : int ): '''simple docstring''' if index >= len(A ) or index < 0: return None _UpperCAmelCase = items[random.randint(0 , len(A ) - 1 )] _UpperCAmelCase = 0 _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _partition(A , A ) _UpperCAmelCase = len(A ) _UpperCAmelCase = len(A ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(A , A ) # must be in larger else: return quick_select(A , index - (m + count) )
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''gpt_neo''' _UpperCAmelCase = ['''past_key_values'''] _UpperCAmelCase = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , snake_case=50257 , snake_case=2048 , snake_case=2048 , snake_case=24 , snake_case=[[["global", "local"], 12]] , snake_case=16 , snake_case=None , snake_case=256 , snake_case="gelu_new" , snake_case=0.0 , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case=1E-5 , snake_case=0.02 , snake_case=True , snake_case=50256 , snake_case=50256 , **snake_case , ) -> Any: _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_layers _UpperCAmelCase = num_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = window_size _UpperCAmelCase = activation_function _UpperCAmelCase = resid_dropout _UpperCAmelCase = embed_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = classifier_dropout _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id _UpperCAmelCase = attention_types _UpperCAmelCase = self.expand_attention_types_params(snake_case ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' f'`config.num_layers = {self.num_layers}`. ' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) @staticmethod def lowerCamelCase_ ( snake_case ) -> Optional[int]: _UpperCAmelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def UpperCAmelCase ( A : Optional[int] , A : int , A : Any , A : Optional[int] ): '''simple docstring''' import torch _UpperCAmelCase = input.size() _UpperCAmelCase = len(A ) _UpperCAmelCase = shape[dimension] _UpperCAmelCase = torch.arange(0 , A , A ) _UpperCAmelCase = torch.div(sizedim - size , A , rounding_mode='floor' ) + 1 _UpperCAmelCase = torch.arange(A ) + low_indices[:min_length][:, None] _UpperCAmelCase = [slice(A )] * rank _UpperCAmelCase = indices _UpperCAmelCase = input[s] _UpperCAmelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(A ) def UpperCAmelCase ( A : Dict , A : Dict ): '''simple docstring''' import torch _UpperCAmelCase = torch.arange(1 , A ) _UpperCAmelCase = torch.remainder(A , A ) _UpperCAmelCase = remainders == 0 _UpperCAmelCase = candidates[divisor_indices] _UpperCAmelCase = torch.max(A ) return largest_divisor, torch.div(A , A , rounding_mode='floor' ) class lowercase__ ( A ): '''simple docstring''' @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: _UpperCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(snake_case , direction='inputs' ) _UpperCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowerCamelCase_ ( self ) -> int: return self._config.num_heads def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]: _UpperCAmelCase = super(snake_case , self ).generate_dummy_inputs( snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case ) # We need to order the input in the way they appears in the forward() _UpperCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values _UpperCAmelCase = seqlen + 2 _UpperCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCAmelCase = [ (torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers ) ] _UpperCAmelCase = common_inputs['attention_mask'] if self.use_past: _UpperCAmelCase = ordered_inputs['attention_mask'].dtype _UpperCAmelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 ) return ordered_inputs @property def lowerCamelCase_ ( self ) -> int: return 13
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
1
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class lowercase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = None , ) -> List[Any]: super().__init__() _UpperCAmelCase = initial_learning_rate _UpperCAmelCase = warmup_steps _UpperCAmelCase = power _UpperCAmelCase = decay_schedule_fn _UpperCAmelCase = name def __call__( self , snake_case ) -> Dict: with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. _UpperCAmelCase = tf.cast(snake_case , tf.floataa ) _UpperCAmelCase = tf.cast(self.warmup_steps , tf.floataa ) _UpperCAmelCase = global_step_float / warmup_steps_float _UpperCAmelCase = self.initial_learning_rate * tf.math.pow(snake_case , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=snake_case , ) def lowerCamelCase_ ( self ) -> Optional[Any]: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCAmelCase ( A : float , A : int , A : int , A : float = 0.0 , A : float = 0.9 , A : float = 0.999 , A : float = 1e-8 , A : Optional[float] = None , A : Optional[float] = None , A : float = 0.0 , A : float = 1.0 , A : Optional[List[str]] = None , ): '''simple docstring''' _UpperCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=A , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A , ) if num_warmup_steps: _UpperCAmelCase = WarmUp( initial_learning_rate=A , decay_schedule_fn=A , warmup_steps=A , ) if weight_decay_rate > 0.0: _UpperCAmelCase = AdamWeightDecay( learning_rate=A , weight_decay_rate=A , beta_a=A , beta_a=A , epsilon=A , clipnorm=A , global_clipnorm=A , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=A , ) else: _UpperCAmelCase = tf.keras.optimizers.Adam( learning_rate=A , beta_a=A , beta_a=A , epsilon=A , clipnorm=A , global_clipnorm=A , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case = 0.001 , snake_case = 0.9 , snake_case = 0.999 , snake_case = 1E-7 , snake_case = False , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "AdamWeightDecay" , **snake_case , ) -> Any: super().__init__(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case ) _UpperCAmelCase = weight_decay_rate _UpperCAmelCase = include_in_weight_decay _UpperCAmelCase = exclude_from_weight_decay @classmethod def lowerCamelCase_ ( cls , snake_case ) -> List[Any]: _UpperCAmelCase = {'WarmUp': WarmUp} return super(snake_case , cls ).from_config(snake_case , custom_objects=snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> int: super(snake_case , self )._prepare_local(snake_case , snake_case , snake_case ) _UpperCAmelCase = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowerCamelCase_ ( self , snake_case , snake_case=None , **snake_case ) -> Any: _UpperCAmelCase , _UpperCAmelCase = list(zip(*snake_case ) ) return super(snake_case , self ).apply_gradients(zip(snake_case , snake_case ) , name=snake_case , **snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Tuple: if apply_state is None: return self._decayed_lr_t[var_dtype], {} _UpperCAmelCase = apply_state or {} _UpperCAmelCase = apply_state.get((var_device, var_dtype) ) if coefficients is None: _UpperCAmelCase = self._fallback_apply_state(snake_case , snake_case ) _UpperCAmelCase = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None ) -> int: _UpperCAmelCase , _UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , snake_case ) _UpperCAmelCase = self._decay_weights_op(snake_case , snake_case , snake_case ) with tf.control_dependencies([decay] ): return super(snake_case , self )._resource_apply_dense(snake_case , snake_case , **snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=None ) -> List[str]: _UpperCAmelCase , _UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , snake_case ) _UpperCAmelCase = self._decay_weights_op(snake_case , snake_case , snake_case ) with tf.control_dependencies([decay] ): return super(snake_case , self )._resource_apply_sparse(snake_case , snake_case , snake_case , **snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowerCamelCase_ ( self , snake_case ) -> str: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(snake_case , snake_case ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(snake_case , snake_case ) is not None: return False return True class lowercase__ ( A ): '''simple docstring''' def __init__( self ) -> Optional[int]: _UpperCAmelCase = [] _UpperCAmelCase = None @property def lowerCamelCase_ ( self ) -> Union[str, Any]: if self._accum_steps is None: _UpperCAmelCase = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCamelCase_ ( self ) -> Optional[int]: if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , snake_case ) -> List[Any]: if not self._gradients: _UpperCAmelCase = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(snake_case ) , trainable=snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(snake_case ) != len(self._gradients ): raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(snake_case )}' ) for accum_gradient, gradient in zip(self._gradients , snake_case ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(snake_case ) self._accum_steps.assign_add(1 ) def lowerCamelCase_ ( self ) -> Tuple: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(snake_case ) )
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
1
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''umt5''' _UpperCAmelCase = ['''past_key_values'''] def __init__( self , snake_case=250112 , snake_case=512 , snake_case=64 , snake_case=1024 , snake_case=8 , snake_case=None , snake_case=6 , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=1.0 , snake_case="gated-gelu" , snake_case=True , snake_case=True , snake_case="T5Tokenizer" , snake_case=True , snake_case=0 , snake_case=1 , snake_case=0 , **snake_case , ) -> List[Any]: super().__init__( is_encoder_decoder=snake_case , tokenizer_class=snake_case , tie_word_embeddings=snake_case , pad_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , ) _UpperCAmelCase = vocab_size _UpperCAmelCase = d_model _UpperCAmelCase = d_kv _UpperCAmelCase = d_ff _UpperCAmelCase = num_layers _UpperCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _UpperCAmelCase = num_heads _UpperCAmelCase = relative_attention_num_buckets _UpperCAmelCase = relative_attention_max_distance _UpperCAmelCase = dropout_rate _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_factor _UpperCAmelCase = feed_forward_proj _UpperCAmelCase = use_cache _UpperCAmelCase = self.feed_forward_proj.split('-' ) _UpperCAmelCase = act_info[-1] _UpperCAmelCase = act_info[0] == 'gated' if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) if feed_forward_proj == "gated-gelu": _UpperCAmelCase = 'gelu_new' @property def lowerCamelCase_ ( self ) -> Union[str, Any]: return self.d_model @property def lowerCamelCase_ ( self ) -> Tuple: return self.num_heads @property def lowerCamelCase_ ( self ) -> Optional[int]: return self.num_layers class lowercase__ ( A ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: _UpperCAmelCase = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: _UpperCAmelCase = 'past_encoder_sequence + sequence' _UpperCAmelCase = {0: 'batch'} _UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'} _UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(snake_case , direction='inputs' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def lowerCamelCase_ ( self ) -> int: return 13 @property def lowerCamelCase_ ( self ) -> float: return 5E-4
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
1
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def UpperCAmelCase ( A : Union[str, Any] , A : Dict , A : int , A : Union[str, Any] , A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _UpperCAmelCase = load_file(A ) _UpperCAmelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _UpperCAmelCase = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) _UpperCAmelCase = pipeline.text_encoder else: _UpperCAmelCase = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) _UpperCAmelCase = pipeline.unet # find the target layer _UpperCAmelCase = layer_infos.pop(0 ) while len(A ) > -1: try: _UpperCAmelCase = curr_layer.__getattr__(A ) if len(A ) > 0: _UpperCAmelCase = layer_infos.pop(0 ) elif len(A ) == 0: break except Exception: if len(A ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _UpperCAmelCase = layer_infos.pop(0 ) _UpperCAmelCase = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(A ) else: pair_keys.append(A ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A , A ).unsqueeze(2 ).unsqueeze(3 ) else: _UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa ) _UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A , A ) # update visited list for item in pair_keys: visited.append(A ) return pipeline if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument( '''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.''' ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors''' ) parser.add_argument( '''--lora_prefix_text_encoder''', default='''lora_te''', type=str, help='''The prefix of text encoder weight in safetensors''', ) parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''') parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''' ) parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') lowercase = parser.parse_args() lowercase = args.base_model_path lowercase = args.checkpoint_path lowercase = args.dump_path lowercase = args.lora_prefix_unet lowercase = args.lora_prefix_text_encoder lowercase = args.alpha lowercase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowercase = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
1
"""simple docstring""" import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration lowercase = 5_00_00 lowercase = 50_00 lowercase , lowercase = os.path.split(__file__) lowercase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def UpperCAmelCase ( A : datasets.Dataset , A : Optional[Any] ): '''simple docstring''' for i in range(A ): _UpperCAmelCase = dataset[i] @get_duration def UpperCAmelCase ( A : datasets.Dataset , A : Optional[Any] , A : List[Any] ): '''simple docstring''' for i in range(0 , len(A ) , A ): _UpperCAmelCase = dataset[i : i + batch_size] @get_duration def UpperCAmelCase ( A : datasets.Dataset , A : List[Any] , A : Dict ): '''simple docstring''' with dataset.formatted_as(type=A ): for i in range(A ): _UpperCAmelCase = dataset[i] @get_duration def UpperCAmelCase ( A : datasets.Dataset , A : Union[str, Any] , A : int , A : Optional[Any] ): '''simple docstring''' with dataset.formatted_as(type=A ): for i in range(0 , A , A ): _UpperCAmelCase = dataset[i : i + batch_size] def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = {'num examples': SPEED_TEST_N_EXAMPLES} _UpperCAmelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] _UpperCAmelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) _UpperCAmelCase = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) _UpperCAmelCase = generate_example_dataset( os.path.join(A , 'dataset.arrow' ) , A , num_examples=A , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(A ) ) _UpperCAmelCase = func(A , **A ) print('shuffling dataset' ) _UpperCAmelCase = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(A ) ) _UpperCAmelCase = func( A , **A ) with open(A , 'wb' ) as f: f.write(json.dumps(A ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
1
"""simple docstring""" def UpperCAmelCase ( A : Any , A : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = [1] for i in range(2 , A ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" _UpperCAmelCase = [] _UpperCAmelCase = list(range(A ) ) # Find permutation while factorials: _UpperCAmelCase = factorials.pop() _UpperCAmelCase , _UpperCAmelCase = divmod(A , A ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
1
"""simple docstring""" import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def UpperCAmelCase ( A : Union[dict, list, tuple, torch.Tensor] ): '''simple docstring''' _UpperCAmelCase = [] if isinstance(A , A ): for v in tree.values(): shapes.extend(_fetch_dims(A ) ) elif isinstance(A , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(A ) ) elif isinstance(A , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('Not supported' ) return shapes @torch.jit.ignore def UpperCAmelCase ( A : int , A : Tuple[int, ...] ): '''simple docstring''' _UpperCAmelCase = [] for d in reversed(A ): idx.append(flat_idx % d ) _UpperCAmelCase = flat_idx // d return tuple(reversed(A ) ) @torch.jit.ignore def UpperCAmelCase ( A : Sequence[int] , A : Sequence[int] , A : Sequence[int] , A : Optional[Sequence[bool]] = None , A : Optional[Sequence[bool]] = None , ): '''simple docstring''' def reduce_edge_list(A : List[bool] ) -> None: _UpperCAmelCase = True for i in range(len(A ) ): _UpperCAmelCase = -1 * (i + 1) l[reversed_idx] &= tally _UpperCAmelCase = l[reversed_idx] if start_edges is None: _UpperCAmelCase = [s == 0 for s in start] reduce_edge_list(A ) if end_edges is None: _UpperCAmelCase = [e == (d - 1) for e, d in zip(A , A )] reduce_edge_list(A ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(A ) == 0: return [()] elif len(A ) == 1: return [(slice(start[0] , end[0] + 1 ),)] _UpperCAmelCase = [] _UpperCAmelCase = [] # Dimensions common to start and end can be selected directly for s, e in zip(A , A ): if s == e: path_list.append(slice(A , s + 1 ) ) else: break _UpperCAmelCase = tuple(A ) _UpperCAmelCase = len(A ) # start == end, and we're done if divergence_idx == len(A ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _UpperCAmelCase = start[divergence_idx] return tuple( path + (slice(A , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _UpperCAmelCase = end[divergence_idx] return tuple( path + (slice(A , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) _UpperCAmelCase = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def UpperCAmelCase ( A : torch.Tensor , A : int , A : int , A : int ): '''simple docstring''' _UpperCAmelCase = t.shape[:no_batch_dims] _UpperCAmelCase = list(_flat_idx_to_idx(A , A ) ) # _get_minimal_slice_set is inclusive _UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , A ) ) # Get an ordered list of slices to perform _UpperCAmelCase = _get_minimal_slice_set( A , A , A , ) _UpperCAmelCase = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def UpperCAmelCase ( A : Callable , A : Dict[str, Any] , A : int , A : int , A : bool = False , A : Any = None , A : bool = False , ): '''simple docstring''' if not (len(A ) > 0): raise ValueError('Must provide at least one input' ) _UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(A )] _UpperCAmelCase = tuple([max(A ) for s in zip(*A )] ) def _prep_inputs(A : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: _UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) _UpperCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: _UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t _UpperCAmelCase = tensor_tree_map(_prep_inputs , A ) _UpperCAmelCase = None if _out is not None: _UpperCAmelCase = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) _UpperCAmelCase = 1 for d in orig_batch_dims: flat_batch_dim *= d _UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(A : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t _UpperCAmelCase = 0 _UpperCAmelCase = prepped_outputs for _ in range(A ): # Chunk the input if not low_mem: _UpperCAmelCase = _select_chunk else: _UpperCAmelCase = partial( _chunk_slice , flat_start=A , flat_end=min(A , i + chunk_size ) , no_batch_dims=len(A ) , ) _UpperCAmelCase = tensor_tree_map(A , A ) # Run the layer on the chunk _UpperCAmelCase = layer(**A ) # Allocate space for the output if out is None: _UpperCAmelCase = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , A ) # Put the chunk in its pre-allocated space if isinstance(A , A ): def assign(A : dict , A : dict ) -> None: for k, v in da.items(): if isinstance(A , A ): assign(A , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: _UpperCAmelCase = da[k] assign(A , A ) elif isinstance(A , A ): for xa, xa in zip(A , A ): if _add_into_out: xa[i : i + chunk_size] += xa else: _UpperCAmelCase = xa elif isinstance(A , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: _UpperCAmelCase = output_chunk else: raise ValueError('Not supported' ) i += chunk_size _UpperCAmelCase = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) , A ) return out class lowercase__ : '''simple docstring''' def __init__( self , snake_case = 512 , ) -> str: _UpperCAmelCase = max_chunk_size _UpperCAmelCase = None _UpperCAmelCase = None def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> int: logging.info('Tuning chunk size...' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size _UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] _UpperCAmelCase = [c for c in candidates if c > min_chunk_size] _UpperCAmelCase = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(snake_case ) -> bool: try: with torch.no_grad(): fn(*snake_case , chunk_size=snake_case ) return True except RuntimeError: return False _UpperCAmelCase = 0 _UpperCAmelCase = len(snake_case ) - 1 while i > min_viable_chunk_size_index: _UpperCAmelCase = test_chunk_size(candidates[i] ) if not viable: _UpperCAmelCase = (min_viable_chunk_size_index + i) // 2 else: _UpperCAmelCase = i _UpperCAmelCase = (i + len(snake_case ) - 1) // 2 return candidates[min_viable_chunk_size_index] def lowerCamelCase_ ( self , snake_case , snake_case ) -> bool: _UpperCAmelCase = True for aa, aa in zip(snake_case , snake_case ): assert type(snake_case ) == type(snake_case ) if isinstance(snake_case , (list, tuple) ): consistent &= self._compare_arg_caches(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): _UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] _UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] consistent &= self._compare_arg_caches(snake_case , snake_case ) else: consistent &= aa == aa return consistent def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , ) -> int: _UpperCAmelCase = True _UpperCAmelCase = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(snake_case ) _UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , snake_case ) else: # Otherwise, we can reuse the precomputed value _UpperCAmelCase = False if not consistent: _UpperCAmelCase = self._determine_favorable_chunk_size( snake_case , snake_case , snake_case , ) _UpperCAmelCase = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def UpperCAmelCase ( A : List[Any] , A : List[str] , A : Optional[int] ): '''simple docstring''' if isinstance(A , torch.Tensor ): return image elif isinstance(A , PIL.Image.Image ): _UpperCAmelCase = [image] if isinstance(image[0] , PIL.Image.Image ): _UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] _UpperCAmelCase = np.concatenate(A , axis=0 ) _UpperCAmelCase = np.array(A ).astype(np.floataa ) / 255.0 _UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 ) _UpperCAmelCase = 2.0 * image - 1.0 _UpperCAmelCase = torch.from_numpy(A ) elif isinstance(image[0] , torch.Tensor ): _UpperCAmelCase = torch.cat(A , dim=0 ) return image def UpperCAmelCase ( A : Optional[Any] , A : Optional[int] , A : Any , A : Dict=0.9995 ): '''simple docstring''' if not isinstance(A , np.ndarray ): _UpperCAmelCase = True _UpperCAmelCase = va.device _UpperCAmelCase = va.cpu().numpy() _UpperCAmelCase = va.cpu().numpy() _UpperCAmelCase = np.sum(va * va / (np.linalg.norm(A ) * np.linalg.norm(A )) ) if np.abs(A ) > DOT_THRESHOLD: _UpperCAmelCase = (1 - t) * va + t * va else: _UpperCAmelCase = np.arccos(A ) _UpperCAmelCase = np.sin(A ) _UpperCAmelCase = theta_a * t _UpperCAmelCase = np.sin(A ) _UpperCAmelCase = np.sin(theta_a - theta_t ) / sin_theta_a _UpperCAmelCase = sin_theta_t / sin_theta_a _UpperCAmelCase = sa * va + sa * va if inputs_are_torch: _UpperCAmelCase = torch.from_numpy(A ).to(A ) return va def UpperCAmelCase ( A : Union[str, Any] , A : str ): '''simple docstring''' _UpperCAmelCase = F.normalize(A , dim=-1 ) _UpperCAmelCase = F.normalize(A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def UpperCAmelCase ( A : List[str] , A : str ): '''simple docstring''' for param in model.parameters(): _UpperCAmelCase = value class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> Union[str, Any]: super().__init__() self.register_modules( vae=snake_case , text_encoder=snake_case , clip_model=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , feature_extractor=snake_case , coca_model=snake_case , coca_tokenizer=snake_case , coca_transform=snake_case , ) _UpperCAmelCase = ( feature_extractor.size if isinstance(feature_extractor.size , snake_case ) else feature_extractor.size['shortest_edge'] ) _UpperCAmelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , snake_case ) set_requires_grad(self.clip_model , snake_case ) def lowerCamelCase_ ( self , snake_case = "auto" ) -> Dict: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: self.enable_attention_slicing(snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: set_requires_grad(self.vae , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: set_requires_grad(self.vae , snake_case ) def lowerCamelCase_ ( self ) -> Dict: set_requires_grad(self.unet , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: set_requires_grad(self.unet , snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Any: # get the original timestep using init_timestep _UpperCAmelCase = min(int(num_inference_steps * strength ) , snake_case ) _UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) _UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ) -> int: if not isinstance(snake_case , torch.Tensor ): raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(snake_case )}' ) _UpperCAmelCase = image.to(device=snake_case , dtype=snake_case ) if isinstance(snake_case , snake_case ): _UpperCAmelCase = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case ) ] _UpperCAmelCase = torch.cat(snake_case , dim=0 ) else: _UpperCAmelCase = self.vae.encode(snake_case ).latent_dist.sample(snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _UpperCAmelCase = 0.18215 * init_latents _UpperCAmelCase = init_latents.repeat_interleave(snake_case , dim=0 ) _UpperCAmelCase = randn_tensor(init_latents.shape , generator=snake_case , device=snake_case , dtype=snake_case ) # get latents _UpperCAmelCase = self.scheduler.add_noise(snake_case , snake_case , snake_case ) _UpperCAmelCase = init_latents return latents def lowerCamelCase_ ( self , snake_case ) -> Optional[int]: _UpperCAmelCase = self.coca_transform(snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _UpperCAmelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _UpperCAmelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def lowerCamelCase_ ( self , snake_case , snake_case ) -> Tuple: _UpperCAmelCase = self.feature_extractor.preprocess(snake_case ) _UpperCAmelCase = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() _UpperCAmelCase = self.clip_model.get_image_features(snake_case ) _UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case ) _UpperCAmelCase = image_embeddings_clip.repeat_interleave(snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Dict: _UpperCAmelCase = latents.detach().requires_grad_() _UpperCAmelCase = self.scheduler.scale_model_input(snake_case , snake_case ) # predict the noise residual _UpperCAmelCase = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _UpperCAmelCase = self.scheduler.alphas_cumprod[timestep] _UpperCAmelCase = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _UpperCAmelCase = torch.sqrt(snake_case ) _UpperCAmelCase = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , snake_case ): _UpperCAmelCase = self.scheduler.sigmas[index] _UpperCAmelCase = latents - sigma * noise_pred else: raise ValueError(f'scheduler type {type(self.scheduler )} not supported' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _UpperCAmelCase = 1 / 0.18215 * sample _UpperCAmelCase = self.vae.decode(snake_case ).sample _UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase = transforms.Resize(self.feature_extractor_size )(snake_case ) _UpperCAmelCase = self.normalize(snake_case ).to(latents.dtype ) _UpperCAmelCase = self.clip_model.get_image_features(snake_case ) _UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case ) _UpperCAmelCase = spherical_dist_loss(snake_case , snake_case ).mean() * clip_guidance_scale _UpperCAmelCase = -torch.autograd.grad(snake_case , snake_case )[0] if isinstance(self.scheduler , snake_case ): _UpperCAmelCase = latents.detach() + grads * (sigma**2) _UpperCAmelCase = noise_pred_original else: _UpperCAmelCase = noise_pred_original - torch.sqrt(snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = 512 , snake_case = 512 , snake_case = 0.6 , snake_case = 50 , snake_case = 7.5 , snake_case = 1 , snake_case = 0.0 , snake_case = 100 , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = 0.8 , snake_case = 0.1 , snake_case = 0.1 , ) -> Tuple: if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size: raise ValueError(f'You have passed {batch_size} batch_size, but only {len(snake_case )} generators.' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if isinstance(snake_case , torch.Generator ) and batch_size > 1: _UpperCAmelCase = [generator] + [None] * (batch_size - 1) _UpperCAmelCase = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] _UpperCAmelCase = [x[0] for x in coca_is_none if x[1]] _UpperCAmelCase = ', '.join(snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(snake_case ): raise ValueError( f'Content prompt is None and CoCa [{coca_is_none_str}] is None.' f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' ) _UpperCAmelCase = self.get_image_description(snake_case ) if style_prompt is None: if len(snake_case ): raise ValueError( f'Style prompt is None and CoCa [{coca_is_none_str}] is None.' f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' ) _UpperCAmelCase = self.get_image_description(snake_case ) # get prompt text embeddings for content and style _UpperCAmelCase = self.tokenizer( snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=snake_case , return_tensors='pt' , ) _UpperCAmelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _UpperCAmelCase = self.tokenizer( snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=snake_case , return_tensors='pt' , ) _UpperCAmelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _UpperCAmelCase = slerp(snake_case , snake_case , snake_case ) # duplicate text embeddings for each generation per prompt _UpperCAmelCase = text_embeddings.repeat_interleave(snake_case , dim=0 ) # set timesteps _UpperCAmelCase = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _UpperCAmelCase = {} if accepts_offset: _UpperCAmelCase = 1 self.scheduler.set_timesteps(snake_case , **snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _UpperCAmelCase , _UpperCAmelCase = self.get_timesteps(snake_case , snake_case , self.device ) _UpperCAmelCase = timesteps[:1].repeat(snake_case ) # Preprocess image _UpperCAmelCase = preprocess(snake_case , snake_case , snake_case ) _UpperCAmelCase = self.prepare_latents( snake_case , snake_case , snake_case , text_embeddings.dtype , self.device , snake_case ) _UpperCAmelCase = preprocess(snake_case , snake_case , snake_case ) _UpperCAmelCase = self.prepare_latents( snake_case , snake_case , snake_case , text_embeddings.dtype , self.device , snake_case ) _UpperCAmelCase = slerp(snake_case , snake_case , snake_case ) if clip_guidance_scale > 0: _UpperCAmelCase = self.get_clip_image_embeddings(snake_case , snake_case ) _UpperCAmelCase = self.get_clip_image_embeddings(snake_case , snake_case ) _UpperCAmelCase = slerp( snake_case , snake_case , snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _UpperCAmelCase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _UpperCAmelCase = content_text_input.input_ids.shape[-1] _UpperCAmelCase = self.tokenizer([''] , padding='max_length' , max_length=snake_case , return_tensors='pt' ) _UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _UpperCAmelCase = uncond_embeddings.repeat_interleave(snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _UpperCAmelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8) _UpperCAmelCase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _UpperCAmelCase = torch.randn(snake_case , generator=snake_case , device='cpu' , dtype=snake_case ).to( self.device ) else: _UpperCAmelCase = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case ) else: if latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) _UpperCAmelCase = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCAmelCase = {} if accepts_eta: _UpperCAmelCase = eta # check if the scheduler accepts generator _UpperCAmelCase = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _UpperCAmelCase = generator with self.progress_bar(total=snake_case ): for i, t in enumerate(snake_case ): # expand the latents if we are doing classifier free guidance _UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCAmelCase = self.scheduler.scale_model_input(snake_case , snake_case ) # predict the noise residual _UpperCAmelCase = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: _UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 ) _UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _UpperCAmelCase = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _UpperCAmelCase , _UpperCAmelCase = self.cond_fn( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _UpperCAmelCase = 1 / 0.18215 * latents _UpperCAmelCase = self.vae.decode(snake_case ).sample _UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
1
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def UpperCAmelCase ( A : Tuple ): '''simple docstring''' for param in module.parameters(): _UpperCAmelCase = False def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _UpperCAmelCase = 'mps' if device == "mps": print( 'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch' ' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues' ' with generations.' ) return device def UpperCAmelCase ( A : Dict ): '''simple docstring''' _UpperCAmelCase = plt.imshow(A ) fig.axes.get_xaxis().set_visible(A ) fig.axes.get_yaxis().set_visible(A ) plt.show() def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = datetime.now() _UpperCAmelCase = current_time.strftime('%H:%M:%S' ) return timestamp
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase__ ( A ): '''simple docstring''' @staticmethod @abstractmethod def lowerCamelCase_ ( snake_case ) -> Any: raise NotImplementedError() @abstractmethod def lowerCamelCase_ ( self ) -> Dict: raise NotImplementedError()
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = 'ylacombe/bark-small' _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = 'en_speaker_1' _UpperCAmelCase = 'This is a test string' _UpperCAmelCase = 'speaker_embeddings_path.json' _UpperCAmelCase = 'speaker_embeddings' def lowerCamelCase_ ( self , **snake_case ) -> Optional[int]: return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case ) def lowerCamelCase_ ( self ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BarkProcessor(tokenizer=snake_case ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _UpperCAmelCase = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _UpperCAmelCase = 35 _UpperCAmelCase = 2 _UpperCAmelCase = 8 _UpperCAmelCase = { 'semantic_prompt': np.ones(snake_case ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case ) _UpperCAmelCase = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case , np.array([] ) ).tolist() ) # test loading voice preset from npz file _UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(snake_case , **snake_case ) _UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case ) _UpperCAmelCase = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case , np.array([] ) ).tolist() ) # test loading voice preset from the hub _UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BarkProcessor(tokenizer=snake_case ) _UpperCAmelCase = processor(text=self.input_string ) _UpperCAmelCase = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) _UpperCAmelCase = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(snake_case ) , torch_builtin(snake_case ) ) ) self.assertFalse(torch.allclose(gelu_python(snake_case ) , gelu_new(snake_case ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) _UpperCAmelCase = get_activation('gelu' ) _UpperCAmelCase = get_activation('gelu_10' ) _UpperCAmelCase = torch_builtin(snake_case ) _UpperCAmelCase = geluaa(snake_case ) _UpperCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(snake_case ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCamelCase_ ( self ) -> Tuple: get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(snake_case ): get_activation('bogus' ) with self.assertRaises(snake_case ): get_activation(snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = get_activation('gelu' ) _UpperCAmelCase = 1 _UpperCAmelCase = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(snake_case ): _UpperCAmelCase = acta.a
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig lowercase = [ '''openmmlab/upernet-convnext-tiny''', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring lowercase = '''UperNetConfig''' class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case , snake_case = 0 , snake_case = False , snake_case = 1 , ) -> None: super().__init__() _UpperCAmelCase = nn.Convad( in_channels=snake_case , out_channels=snake_case , kernel_size=snake_case , padding=snake_case , bias=snake_case , dilation=snake_case , ) _UpperCAmelCase = nn.BatchNormad(snake_case ) _UpperCAmelCase = nn.ReLU() def lowerCamelCase_ ( self , snake_case ) -> torch.Tensor: _UpperCAmelCase = self.conv(snake_case ) _UpperCAmelCase = self.batch_norm(snake_case ) _UpperCAmelCase = self.activation(snake_case ) return output class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case ) -> None: super().__init__() _UpperCAmelCase = [ nn.AdaptiveAvgPoolad(snake_case ), UperNetConvModule(snake_case , snake_case , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(snake_case ) , snake_case ) def lowerCamelCase_ ( self , snake_case ) -> torch.Tensor: _UpperCAmelCase = input for layer in self.layers: _UpperCAmelCase = layer(snake_case ) return hidden_state class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case , snake_case ) -> None: super().__init__() _UpperCAmelCase = pool_scales _UpperCAmelCase = align_corners _UpperCAmelCase = in_channels _UpperCAmelCase = channels _UpperCAmelCase = [] for i, pool_scale in enumerate(snake_case ): _UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=snake_case , in_channels=snake_case , channels=snake_case ) self.blocks.append(snake_case ) self.add_module(str(snake_case ) , snake_case ) def lowerCamelCase_ ( self , snake_case ) -> List[torch.Tensor]: _UpperCAmelCase = [] for ppm in self.blocks: _UpperCAmelCase = ppm(snake_case ) _UpperCAmelCase = nn.functional.interpolate( snake_case , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners ) ppm_outs.append(snake_case ) return ppm_outs class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case ) -> List[str]: super().__init__() _UpperCAmelCase = config _UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6) _UpperCAmelCase = in_channels _UpperCAmelCase = config.hidden_size _UpperCAmelCase = False _UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module _UpperCAmelCase = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) _UpperCAmelCase = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module _UpperCAmelCase = nn.ModuleList() _UpperCAmelCase = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer _UpperCAmelCase = UperNetConvModule(snake_case , self.channels , kernel_size=1 ) _UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(snake_case ) self.fpn_convs.append(snake_case ) _UpperCAmelCase = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def lowerCamelCase_ ( self ) -> Optional[int]: self.apply(self._init_weights ) def lowerCamelCase_ ( self , snake_case ) -> Tuple: if isinstance(snake_case , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def lowerCamelCase_ ( self , snake_case ) -> Optional[int]: _UpperCAmelCase = inputs[-1] _UpperCAmelCase = [x] psp_outs.extend(self.psp_modules(snake_case ) ) _UpperCAmelCase = torch.cat(snake_case , dim=1 ) _UpperCAmelCase = self.bottleneck(snake_case ) return output def lowerCamelCase_ ( self , snake_case ) -> torch.Tensor: # build laterals _UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(snake_case ) ) # build top-down path _UpperCAmelCase = len(snake_case ) for i in range(used_backbone_levels - 1 , 0 , -1 ): _UpperCAmelCase = laterals[i - 1].shape[2:] _UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=snake_case , mode='bilinear' , align_corners=self.align_corners ) # build outputs _UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): _UpperCAmelCase = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners ) _UpperCAmelCase = torch.cat(snake_case , dim=1 ) _UpperCAmelCase = self.fpn_bottleneck(snake_case ) _UpperCAmelCase = self.classifier(snake_case ) return output class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case = 2 , snake_case = 3 , snake_case = 1 ) -> None: super().__init__() _UpperCAmelCase = config _UpperCAmelCase = config.auxiliary_in_channels _UpperCAmelCase = config.auxiliary_channels _UpperCAmelCase = config.auxiliary_num_convs _UpperCAmelCase = config.auxiliary_concat_input _UpperCAmelCase = in_index _UpperCAmelCase = (kernel_size // 2) * dilation _UpperCAmelCase = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=snake_case , padding=snake_case , dilation=snake_case ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=snake_case , padding=snake_case , dilation=snake_case ) ) if self.num_convs == 0: _UpperCAmelCase = nn.Identity() else: _UpperCAmelCase = nn.Sequential(*snake_case ) if self.concat_input: _UpperCAmelCase = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=snake_case , padding=kernel_size // 2 ) _UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def lowerCamelCase_ ( self ) -> Dict: self.apply(self._init_weights ) def lowerCamelCase_ ( self , snake_case ) -> Union[str, Any]: if isinstance(snake_case , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def lowerCamelCase_ ( self , snake_case ) -> torch.Tensor: # just take the relevant feature maps _UpperCAmelCase = encoder_hidden_states[self.in_index] _UpperCAmelCase = self.convs(snake_case ) if self.concat_input: _UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) _UpperCAmelCase = self.classifier(snake_case ) return output class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = UperNetConfig _UpperCAmelCase = '''pixel_values''' _UpperCAmelCase = True def lowerCamelCase_ ( self , snake_case ) -> List[str]: if isinstance(snake_case , snake_case ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def lowerCamelCase_ ( self ) -> Optional[int]: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def lowerCamelCase_ ( self , snake_case , snake_case=False ) -> Any: if isinstance(snake_case , snake_case ): _UpperCAmelCase = value lowercase = r''' Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowercase = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''', A, ) class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case ) -> Dict: super().__init__(snake_case ) _UpperCAmelCase = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) _UpperCAmelCase = UperNetHead(snake_case , in_channels=self.backbone.channels ) _UpperCAmelCase = UperNetFCNHead(snake_case ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def lowerCamelCase_ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ) -> Union[tuple, SemanticSegmenterOutput]: _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions _UpperCAmelCase = self.backbone.forward_with_filtered_kwargs( snake_case , output_hidden_states=snake_case , output_attentions=snake_case ) _UpperCAmelCase = outputs.feature_maps _UpperCAmelCase = self.decode_head(snake_case ) _UpperCAmelCase = nn.functional.interpolate(snake_case , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=snake_case ) _UpperCAmelCase = None if self.auxiliary_head is not None: _UpperCAmelCase = self.auxiliary_head(snake_case ) _UpperCAmelCase = nn.functional.interpolate( snake_case , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=snake_case ) _UpperCAmelCase = None if labels is not None: if self.config.num_labels == 1: raise ValueError('The number of labels should be greater than one' ) else: # compute weighted loss _UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) _UpperCAmelCase = loss_fct(snake_case , snake_case ) _UpperCAmelCase = loss_fct(snake_case , snake_case ) _UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: _UpperCAmelCase = (logits,) + outputs[1:] else: _UpperCAmelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
1
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers lowercase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''gpt_neox''' def __init__( self , snake_case=50432 , snake_case=6144 , snake_case=44 , snake_case=64 , snake_case=24576 , snake_case="gelu" , snake_case=0.25 , snake_case=10000 , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case=2048 , snake_case=0.02 , snake_case=1E-5 , snake_case=True , snake_case=0 , snake_case=2 , snake_case=False , snake_case=True , snake_case=None , **snake_case , ) -> Union[str, Any]: super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = rotary_pct _UpperCAmelCase = rotary_emb_base _UpperCAmelCase = attention_dropout _UpperCAmelCase = hidden_dropout _UpperCAmelCase = classifier_dropout _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = tie_word_embeddings _UpperCAmelCase = use_parallel_residual _UpperCAmelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( 'The hidden size is not divisble by the number of attention heads! Make sure to update them!' ) def lowerCamelCase_ ( self ) -> Dict: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , snake_case ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'got {self.rope_scaling}' ) _UpperCAmelCase = self.rope_scaling.get('type' , snake_case ) _UpperCAmelCase = self.rope_scaling.get('factor' , snake_case ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(snake_case , snake_case ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowercase = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowercase = logging.get_logger(__name__) def UpperCAmelCase ( A : int , A : str , A : Tuple ): '''simple docstring''' _UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(A , config=A ) _UpperCAmelCase = downstream_dict['projector.weight'] _UpperCAmelCase = downstream_dict['projector.bias'] _UpperCAmelCase = downstream_dict['model.post_net.linear.weight'] _UpperCAmelCase = downstream_dict['model.post_net.linear.bias'] return model def UpperCAmelCase ( A : str , A : int , A : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(A , config=A ) _UpperCAmelCase = downstream_dict['model.linear.weight'] _UpperCAmelCase = downstream_dict['model.linear.bias'] return model def UpperCAmelCase ( A : int , A : Optional[Any] , A : Dict ): '''simple docstring''' _UpperCAmelCase = WavaVecaForXVector.from_pretrained(A , config=A ) _UpperCAmelCase = downstream_dict['connector.weight'] _UpperCAmelCase = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _UpperCAmelCase = downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] _UpperCAmelCase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] _UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] _UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] _UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] _UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] _UpperCAmelCase = downstream_dict['objective.W'] return model @torch.no_grad() def UpperCAmelCase ( A : Optional[int] , A : Optional[Any] , A : str , A : Dict ): '''simple docstring''' _UpperCAmelCase = torch.load(A , map_location='cpu' ) _UpperCAmelCase = checkpoint['Downstream'] _UpperCAmelCase = WavaVecaConfig.from_pretrained(A ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( A , return_attention_mask=A , do_normalize=A ) _UpperCAmelCase = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): _UpperCAmelCase = convert_classification(A , A , A ) elif arch.endswith('ForAudioFrameClassification' ): _UpperCAmelCase = convert_diarization(A , A , A ) elif arch.endswith('ForXVector' ): _UpperCAmelCase = convert_xvector(A , A , A ) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: _UpperCAmelCase = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(A ) hf_model.save_pretrained(A ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') lowercase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case=None , **snake_case ) -> List[Any]: warnings.warn( '`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ' 'instead.' , snake_case , ) super().__init__(args=snake_case , **snake_case )
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
1
"""simple docstring""" from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , **snake_case , ) -> Tuple: _UpperCAmelCase = path_or_paths _UpperCAmelCase = split if split or isinstance(snake_case , snake_case ) else 'train' _UpperCAmelCase = features _UpperCAmelCase = cache_dir _UpperCAmelCase = keep_in_memory _UpperCAmelCase = streaming _UpperCAmelCase = num_proc _UpperCAmelCase = kwargs @abstractmethod def lowerCamelCase_ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , **snake_case , ) -> Tuple: _UpperCAmelCase = features _UpperCAmelCase = cache_dir _UpperCAmelCase = keep_in_memory _UpperCAmelCase = streaming _UpperCAmelCase = num_proc _UpperCAmelCase = kwargs @abstractmethod def lowerCamelCase_ ( self ) -> Union[Dataset, IterableDataset]: pass
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
1
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME lowercase = ['''small''', '''medium''', '''large'''] lowercase = '''lm_head.decoder.weight''' lowercase = '''lm_head.weight''' def UpperCAmelCase ( A : str , A : str ): '''simple docstring''' _UpperCAmelCase = torch.load(A ) _UpperCAmelCase = d.pop(A ) os.makedirs(A , exist_ok=A ) torch.save(A , os.path.join(A , A ) ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) lowercase = parser.parse_args() for MODEL in DIALOGPT_MODELS: lowercase = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''') lowercase = F'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
1