code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58 |
def UpperCamelCase ( snake_case__ : list ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__snake_case :str = grid[0]
for row_n in range(1 ,len(snake_case__ ) ):
__snake_case :Optional[int] = grid[row_n]
__snake_case :Optional[Any] = fill_row(snake_case__ ,snake_case__ )
__snake_case :Dict = grid[row_n]
return grid[-1][-1]
def UpperCamelCase ( snake_case__ : list ,snake_case__ : list ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(snake_case__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str ) -> Dict:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any ={
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =["ConditionalDetrFeatureExtractor"]
_lowercase : Optional[int] =["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
_snake_case = {'''mobilebert-uncased''': 512}
_snake_case = {}
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = VOCAB_FILES_NAMES
lowerCamelCase__: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: int = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: List[Any] = MobileBertTokenizer
def __init__( self: Dict , __lowerCamelCase: List[str]=None , __lowerCamelCase: str=None , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]="[UNK]" , __lowerCamelCase: int="[SEP]" , __lowerCamelCase: Optional[Any]="[PAD]" , __lowerCamelCase: Optional[Any]="[CLS]" , __lowerCamelCase: Optional[Any]="[MASK]" , __lowerCamelCase: List[str]=True , __lowerCamelCase: Tuple=None , **__lowerCamelCase: Optional[int] , ) -> str:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : str = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
__UpperCAmelCase : Dict = do_lower_case
__UpperCAmelCase : Union[str, Any] = strip_accents
__UpperCAmelCase : Tuple = tokenize_chinese_chars
__UpperCAmelCase : Union[str, Any] = normalizer_class(**__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = do_lower_case
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any]=None ) -> Tuple:
__UpperCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self: str , __lowerCamelCase: List[str] , __lowerCamelCase: Any = None ) -> List[int]:
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] = None ) -> Tuple[str]:
__UpperCAmelCase : List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 382 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_a = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_a = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_a = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__a = ZeroShotClassificationPipeline(
model=UpperCamelCase , tokenizer=UpperCamelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Tuple:
__a = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase )]} )
# No kwarg
__a = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase )]} )
__a = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase )]} )
__a = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase ), ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase ), ANY(UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__a = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase ), ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase ), ANY(UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__a = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
__a = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCamelCase , [
{'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase ), ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase ), ANY(UpperCamelCase )]}
for i in range(1 )
] , )
__a = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCamelCase , [
{'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase ), ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase ), ANY(UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCamelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCamelCase ):
classifier(UpperCamelCase , candidate_labels='politics' )
with self.assertRaises(UpperCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCamelCase )
with self.assertRaises(UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCamelCase , )
self.run_entailment_id(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[int]:
__a = zero_shot_classifier.model.config
__a = config.labelaid
__a = zero_shot_classifier.entailment_id
__a = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__a = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__a = original_labelaid
self.assertEqual(UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase__ ( self ) -> Tuple:
__a = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
__a = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__a = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase__ ( self ) -> Dict:
__a = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__a = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
__a = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__a = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__a = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 539 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a__ : int = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class a_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaTokenizer
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[str] = True
def __lowerCAmelCase ( self ) ->Dict:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaTokenizer(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = AddedToken('''<mask>''' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = "this is a test"
SCREAMING_SNAKE_CASE : Union[str, Any] = "this is a test"
return input_text, output_text
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=20 , _lowerCamelCase=5 ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = self.get_input_output_texts(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return text, ids
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = "<pad>"
SCREAMING_SNAKE_CASE : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_lowerCamelCase ) , 81 )
def __lowerCAmelCase ( self ) ->Any:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : int = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE : Dict = ["aaaaa bbbbbb", "cccccccccdddddddd"]
SCREAMING_SNAKE_CASE : Dict = tokenizer.add_tokens(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , len(_lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , all_size + len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowerCamelCase )
self.assertGreaterEqual(len(_lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.add_special_tokens(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , len(_lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , all_size_a + len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowerCamelCase )
self.assertGreaterEqual(len(_lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
def __lowerCAmelCase ( self ) ->Any:
pass
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_lowerCamelCase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
# fmt: off
self.assertListEqual(_lowerCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE : List[str] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
SCREAMING_SNAKE_CASE : Dict = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowerCamelCase , )
| 715 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[int] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1E-2
@property
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def __lowerCAmelCase ( self ) ->str:
return (3, 32, 32)
@property
def __lowerCAmelCase ( self ) ->Dict:
return (3, 32, 32)
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) ->Dict:
pass
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __lowerCAmelCase ( self ) ->Dict:
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE : Tuple = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn_like(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE : str = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE : Any = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE : Tuple = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
SCREAMING_SNAKE_CASE : List[Any] = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE : Optional[int] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
SCREAMING_SNAKE_CASE : Dict = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE : List[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample
SCREAMING_SNAKE_CASE : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(_lowerCamelCase ) for s in shape] )}.npy"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def __lowerCAmelCase ( self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = '''fp16''' if fpaa else None
SCREAMING_SNAKE_CASE : Dict = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL.from_pretrained(
_lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , )
model.to(_lowerCamelCase ).eval()
return model
def __lowerCAmelCase ( self , _lowerCamelCase=0 ) ->Optional[int]:
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : str = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : int = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.encode(_lowerCamelCase ).latent_dist
SCREAMING_SNAKE_CASE : int = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
| 333 | 0 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = 0
A__ = [0]
A__ = [0]
A__ = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
A__ = [60]
A__ = [10]
A__ = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = 3
A__ = [1, 2, 3]
A__ = [3, 2, 1]
A__ = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 5 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = 50
A__ = [60, 100, 120]
A__ = [10, 20, 30]
A__ = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 220 )
if __name__ == "__main__":
unittest.main()
| 260 |
"""simple docstring"""
from collections.abc import Callable
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = a
A__ = b
if function(lowerCAmelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase__ ) == 0:
return b
elif (
function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
A__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase__ ) == 0:
return mid
elif function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) < 0:
A__ = mid
else:
A__ = mid
A__ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( lowerCAmelCase__ ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 260 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Any = """mobilenet_v1"""
def __init__( self , snake_case_=3 , snake_case_=2_2_4 , snake_case_=1.0 , snake_case_=8 , snake_case_="relu6" , snake_case_=True , snake_case_=0.9_99 , snake_case_=0.02 , snake_case_=0.0_01 , **snake_case_ , ):
"""simple docstring"""
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
A_ : List[Any] = num_channels
A_ : Optional[int] = image_size
A_ : List[Any] = depth_multiplier
A_ : List[Any] = min_depth
A_ : str = hidden_act
A_ : Any = tf_padding
A_ : int = classifier_dropout_prob
A_ : int = initializer_range
A_ : Optional[Any] = layer_norm_eps
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return 1E-4
| 302 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ['BeitFeatureExtractor']
lowerCamelCase_ : Optional[Any] = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a: Any = datasets.utils.logging.get_logger(__name__)
__a: str = ["""names""", """prefix"""]
__a: str = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
__a: Any = ["""encoding_errors""", """on_bad_lines"""]
__a: Any = ["""date_format"""]
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
'''simple docstring'''
_lowerCamelCase = ''','''
_lowerCamelCase = None
_lowerCamelCase = '''infer'''
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = True
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = False
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = None
_lowerCamelCase = '''.'''
_lowerCamelCase = None
_lowerCamelCase = '''"'''
_lowerCamelCase = 0
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = 0
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = None
_lowerCamelCase = 10000
_lowerCamelCase = None
_lowerCamelCase = '''strict'''
_lowerCamelCase = '''error'''
_lowerCamelCase = None
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase = self.delimiter
if self.column_names is not None:
_UpperCAmelCase = self.column_names
@property
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_lowerCamelCase = CsvConfig
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase ( self : str , lowerCamelCase : List[str] ) -> Any:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
_UpperCAmelCase = data_files
if isinstance(snake_case__ , snake_case__ ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(snake_case__ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase ( self : str , lowerCamelCase : Any ) -> List[str]:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase = table_cast(snake_case__ , snake_case__ )
return pa_table
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
_UpperCAmelCase = pd.read_csv(snake_case__ , iterator=snake_case__ , dtype=snake_case__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case__ ):
_UpperCAmelCase = pa.Table.from_pandas(snake_case__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case__ )
except ValueError as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(snake_case__ )}: {e}""" )
raise
| 108 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowercase (_A , _A = 2 , _A = 1 , _A = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_A , _A , _A ) -> int:
return (pow(_A , 2 ) + step) % modulus
for _ in range(_A ):
# These track the position within the cycle detection logic.
_lowerCAmelCase : Dict = seed
_lowerCAmelCase : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_lowerCAmelCase : str = rand_fn(_A , _A , _A )
_lowerCAmelCase : Optional[int] = rand_fn(_A , _A , _A )
_lowerCAmelCase : Union[str, Any] = rand_fn(_A , _A , _A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_lowerCAmelCase : Optional[int] = gcd(hare - tortoise , _A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_lowerCAmelCase : Tuple = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
lowerCAmelCase : List[str] = parser.parse_args()
lowerCAmelCase : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
lowerCAmelCase : Union[str, Any] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 444 | 0 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE : str = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase, _lowercase ) -> List[str]:
SCREAMING_SNAKE_CASE_ = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = source_vertex
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = {self.source_vertex}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_snake_case )
SCREAMING_SNAKE_CASE_ = vertex
queue.append(_snake_case )
def a__ ( self, _lowercase ) -> Optional[Any]:
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE_ = self.parent.get(_snake_case )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE_ = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(_snake_case )
return self.shortest_path(_snake_case ) + f"""->{target_vertex}"""
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : int = '''ernie_m'''
lowerCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : str , _lowercase : int = 250_002 , _lowercase : int = 768 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 3_072 , _lowercase : str = "gelu" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : int = 514 , _lowercase : float = 0.02 , _lowercase : int = 1 , _lowercase : float = 1E-05 , _lowercase : Tuple=None , _lowercase : Optional[int]=False , _lowercase : Union[str, Any]=0.0 , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_UpperCamelCase: Any = vocab_size
_UpperCamelCase: Union[str, Any] = hidden_size
_UpperCamelCase: Any = num_hidden_layers
_UpperCamelCase: Optional[Any] = num_attention_heads
_UpperCamelCase: Any = intermediate_size
_UpperCamelCase: str = hidden_act
_UpperCamelCase: List[Any] = hidden_dropout_prob
_UpperCamelCase: List[Any] = attention_probs_dropout_prob
_UpperCamelCase: Tuple = max_position_embeddings
_UpperCamelCase: Optional[Any] = initializer_range
_UpperCamelCase: Optional[int] = layer_norm_eps
_UpperCamelCase: str = classifier_dropout
_UpperCamelCase: Any = is_decoder
_UpperCamelCase: List[Any] = act_dropout
| 271 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , _lowercase : List[str] , _lowercase : Dict=13 , _lowercase : Any=7 , _lowercase : List[str]=True , _lowercase : Any=True , _lowercase : Optional[int]=True , _lowercase : int=True , _lowercase : str=99 , _lowercase : Union[str, Any]=32 , _lowercase : Any=5 , _lowercase : Dict=4 , _lowercase : Tuple=37 , _lowercase : Optional[Any]="gelu" , _lowercase : Optional[int]=0.1 , _lowercase : Any=0.1 , _lowercase : Union[str, Any]=512 , _lowercase : Dict=16 , _lowercase : int=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Dict=4 , ):
"""simple docstring"""
_UpperCamelCase: List[str] = parent
_UpperCamelCase: Optional[int] = batch_size
_UpperCamelCase: Any = seq_length
_UpperCamelCase: List[str] = is_training
_UpperCamelCase: Union[str, Any] = use_attention_mask
_UpperCamelCase: Union[str, Any] = use_token_type_ids
_UpperCamelCase: str = use_labels
_UpperCamelCase: List[str] = vocab_size
_UpperCamelCase: Dict = hidden_size
_UpperCamelCase: List[Any] = num_hidden_layers
_UpperCamelCase: List[Any] = num_attention_heads
_UpperCamelCase: Tuple = intermediate_size
_UpperCamelCase: Optional[Any] = hidden_act
_UpperCamelCase: Any = hidden_dropout_prob
_UpperCamelCase: int = attention_probs_dropout_prob
_UpperCamelCase: Optional[int] = max_position_embeddings
_UpperCamelCase: List[Any] = type_vocab_size
_UpperCamelCase: List[Any] = type_sequence_label_size
_UpperCamelCase: List[Any] = initializer_range
_UpperCamelCase: Optional[int] = num_choices
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: Optional[int] = None
if self.use_attention_mask:
_UpperCamelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase: Optional[int] = None
if self.use_token_type_ids:
_UpperCamelCase: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase: str = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Any = config_and_inputs
_UpperCamelCase: List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Any = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: str = config_and_inputs
_UpperCamelCase: int = True
_UpperCamelCase: Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Dict = True
lowerCAmelCase : List[Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Dict = FlaxBertModelTester(self )
@slow
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Dict = FlaxBertModel.from_pretrained('''bert-base-cased''' )
_UpperCamelCase: int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 271 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : float | Decimal , __lowerCamelCase : float = 10**-10 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = a
while True:
__SCREAMING_SNAKE_CASE : Optional[int] = Decimal(__lowerCamelCase ) - (
Decimal(eval(__lowerCamelCase ) ) / Decimal(eval(str(diff(__lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCamelCase ) ) < precision: # noqa: S307
return float(__lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 447 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCamelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _lowerCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Dict = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Tuple = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Dict = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : List[str] = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__SCREAMING_SNAKE_CASE : List[Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__SCREAMING_SNAKE_CASE : Any = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def _lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {}
import re
__SCREAMING_SNAKE_CASE : str = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Tuple = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Tuple = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : List[str] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : int = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : List[str] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Dict = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = re_encoder_block_conv_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : int = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_encoder_block_conv_in.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = re_encoder_block_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : Any = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = prefix + resnet_block
__SCREAMING_SNAKE_CASE : List[str] = re_encoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = re_encoder_block_proj_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[int] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Dict = re_encoder_block_proj_out.sub(__lowerCamelCase , __lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = re_decoder_block_conv_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : List[Any] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_decoder_block_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : Any = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Tuple = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : List[Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : List[Any] = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_decoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_proj_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : int = re_decoder_block_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_conv_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = re_prior_cond_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : str = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Any = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = re_prior_cond_proj_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = regex_match.groups()
__SCREAMING_SNAKE_CASE : Dict = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : int = re_prior_cond_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# keep original key
else:
__SCREAMING_SNAKE_CASE : Tuple = original_key
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_key(__lowerCamelCase )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
__SCREAMING_SNAKE_CASE : List[str] = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__SCREAMING_SNAKE_CASE : str = original_key
__SCREAMING_SNAKE_CASE : List[str] = original_key
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _lowerCAmelCase ( __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__SCREAMING_SNAKE_CASE : Dict = requests.get(F"""{PREFIX}{file}""" , allow_redirects=__lowerCamelCase )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=__lowerCamelCase )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
__SCREAMING_SNAKE_CASE : int = MODEL_MAPPING[model_name.split("/" )[-1]]
__SCREAMING_SNAKE_CASE : List[str] = JukeboxConfig.from_pretrained(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = JukeboxModel(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for i, dict_name in enumerate(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__SCREAMING_SNAKE_CASE : Optional[int] = old_dic[k]
elif k.endswith(".w" ):
__SCREAMING_SNAKE_CASE : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__SCREAMING_SNAKE_CASE : Optional[Any] = old_dic[k]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = old_dic[k]
__SCREAMING_SNAKE_CASE : Optional[Any] = "vqvae" if i == 0 else F"""priors.{3 - i}"""
__SCREAMING_SNAKE_CASE : int = fix_jukebox_keys(__lowerCamelCase , model.state_dict() , __lowerCamelCase , __lowerCamelCase )
weight_dict.append(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 447 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case :Tuple =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case :Optional[int] =128022
__snake_case :int =128028
@require_sentencepiece
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : Optional[int] = MaMaaaTokenizer
A_ : str = False
A_ : Union[str, Any] = False
A_ : Dict = True
def __UpperCamelCase ( self : int ) -> Optional[Any]:
super().setUp()
A = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
A = Path(self.tmpdirname )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
A = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Optional[int] , **__UpperCamelCase : List[str] ) -> List[str]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict ) -> int:
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase ( self : str ) -> int:
A = '</s>'
A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
A = self.get_tokenizer()
A = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(__UpperCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __UpperCamelCase ( self : List[str] ) -> Dict:
pass
def __UpperCamelCase ( self : List[str] ) -> str:
A = self.get_tokenizer()
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [2, 3, 4, 5, 6] , )
A = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
A = tokenizer.convert_tokens_to_string(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , 'This is a test' )
@slow
def __UpperCamelCase ( self : str ) -> Optional[Any]:
# fmt: off
A = {'input_ids': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
A_ : Optional[Any] = 'facebook/m2m100_418M'
A_ : Dict = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
A_ : List[Any] = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
A_ : Dict = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def __UpperCamelCase ( cls : Tuple ) -> Union[str, Any]:
A = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
A = 1
return cls
def __UpperCamelCase ( self : Tuple ) -> Tuple:
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 128_063 )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
A = self.tokenizer.get_vocab()
self.assertEqual(len(__UpperCamelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , __UpperCamelCase )
def __UpperCamelCase ( self : Any ) -> Dict:
A = 'en'
A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
# fmt: off
A = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
A = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Any:
A = tempfile.mkdtemp()
A = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__UpperCamelCase )
A = MaMaaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.lang_token_to_id , __UpperCamelCase )
@require_torch
def __UpperCamelCase ( self : Any ) -> str:
A = 'en'
A = 'fr'
A = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors='pt' )
A = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
A = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __UpperCamelCase ( self : Any ) -> List[Any]:
A = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
A = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> str:
A = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __UpperCamelCase ( self : str ) -> Optional[int]:
A = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
'input_ids': [[128_022, 58, 4_183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128_006,
} , )
| 106 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
A = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# This test is for deprecated behavior and can be removed in v5
try:
A = tempfile.mktemp()
with open(__UpperCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , __UpperCamelCase )
A = AlbertTokenizer.from_pretrained(__UpperCamelCase )
finally:
os.remove(__UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , __UpperCamelCase )
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self : str ) -> int:
# This test is for deprecated behavior and can be removed in v5
A = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
A_ : str = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __UpperCamelCase ( cls : Tuple ) -> Dict:
A = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def __UpperCamelCase ( cls : Any ) -> Dict:
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase , repo_id='test-tokenizer' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__UpperCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = CustomTokenizer(__UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizerFast.from_pretrained(__UpperCamelCase )
bert_tokenizer.save_pretrained(__UpperCamelCase )
A = CustomTokenizerFast.from_pretrained(__UpperCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
A = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__UpperCamelCase , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self : Dict ) -> Tuple:
A = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
A = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
A = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
A = Trie()
A = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__UpperCamelCase , ['AB', 'C'] )
| 106 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase = "bart"
_UpperCAmelCase = True
@st.cache(allow_output_mutation=_a )
def _lowerCamelCase ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
_lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_lowerCamelCase = qar_model.eval()
else:
_lowerCamelCase , _lowerCamelCase = (None, None)
if MODEL_TYPE == "bart":
_lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_lowerCamelCase = sas_model.eval()
else:
_lowerCamelCase , _lowerCamelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_a )
def _lowerCamelCase ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
_lowerCamelCase = faiss.StandardGpuResources()
_lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_lowerCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_2_8) , )
_lowerCamelCase = faiss.IndexFlatIP(1_2_8 )
_lowerCamelCase = faiss.index_cpu_to_gpu(_a , 1 , _a )
wikiaab_gpu_index_flat.add(_a ) # TODO fix for larger GPU
else:
_lowerCamelCase , _lowerCamelCase = (None, None)
_lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_a )
def _lowerCamelCase ( ):
"""simple docstring"""
_lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_lowerCamelCase = elia['''train_eli5''']
_lowerCamelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_2_8) )
_lowerCamelCase = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(_a )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_models()
_UpperCAmelCase , _UpperCAmelCase = load_train_data()
def _lowerCamelCase ( _a , _a=1_0 ):
"""simple docstring"""
_lowerCamelCase = embed_questions_for_retrieval([question] , _a , _a )
_lowerCamelCase , _lowerCamelCase = eli5_train_q_index.search(_a , _a )
_lowerCamelCase = [elia_train[int(_a )] for i in I[0]]
return nn_examples
def _lowerCamelCase ( _a , _a="wiki40b" , _a="dense" , _a=1_0 ):
"""simple docstring"""
if source == "none":
_lowerCamelCase , _lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
_lowerCamelCase , _lowerCamelCase = query_qa_dense_index(
_a , _a , _a , _a , _a , _a )
else:
_lowerCamelCase , _lowerCamelCase = query_es_index(
_a , _a , index_name='''english_wiki40b_snippets_100w''' , n_results=_a , )
_lowerCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_lowerCamelCase = '''question: {} context: {}'''.format(_a , _a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _a : None),
} )
def _lowerCamelCase ( _a , _a , _a , _a=6_4 , _a=2_5_6 , _a=False , _a=2 , _a=0.95 , _a=0.8 ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase = qa_sas_generate(
_a , _a , _a , num_answers=1 , num_beams=_a , min_len=_a , max_len=_a , do_sample=_a , temp=_a , top_p=_a , top_k=_a , max_input_length=1_0_2_4 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
_UpperCAmelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
_UpperCAmelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
_UpperCAmelCase = st.sidebar.checkbox("Demo options")
if demo_options:
_UpperCAmelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
_UpperCAmelCase = action_list.index(action_st)
_UpperCAmelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
_UpperCAmelCase = show_type == "Show full text of passages"
else:
_UpperCAmelCase = 3
_UpperCAmelCase = True
_UpperCAmelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
_UpperCAmelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
_UpperCAmelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
_UpperCAmelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
_UpperCAmelCase = "wiki40b"
_UpperCAmelCase = "dense"
_UpperCAmelCase = "beam"
_UpperCAmelCase = 2
_UpperCAmelCase = 64
_UpperCAmelCase = 256
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = st.sidebar.checkbox("Generation options")
if generate_options:
_UpperCAmelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
_UpperCAmelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
_UpperCAmelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
_UpperCAmelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
_UpperCAmelCase = None
# start main text
_UpperCAmelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
_UpperCAmelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase = st.text_input("Enter your question here:", "")
else:
_UpperCAmelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
_UpperCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase = support_list[:10]
_UpperCAmelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
_UpperCAmelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
_UpperCAmelCase = res[1].strip()
if sec_titles == "":
_UpperCAmelCase = "[{}]({})".format(res[0], wiki_url)
else:
_UpperCAmelCase = sec_titles.split(" & ")
_UpperCAmelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase = find_nearest_training(question)
_UpperCAmelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
_UpperCAmelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
_UpperCAmelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 297 |
from __future__ import annotations
from collections.abc import MutableSequence
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__ ):
if len(a__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
_lowerCamelCase = list(a__ )
_lowerCamelCase = degree
def __add__( self , a__ ):
if self.degree > polynomial_a.degree:
_lowerCamelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , a__ )
else:
_lowerCamelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , a__ )
def __sub__( self , a__ ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , a__ ):
_lowerCamelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
_lowerCamelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(a__ )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCAmelCase ( self ):
_lowerCamelCase = [0] * self.degree
for i in range(self.degree ):
_lowerCamelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , a__ )
def _UpperCAmelCase ( self , a__ = 0 ):
_lowerCamelCase = [0] * (self.degree + 2)
_lowerCamelCase = constant
for i in range(self.degree + 1 ):
_lowerCamelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , a__ )
def __eq__( self , a__ ):
if not isinstance(a__ , a__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , a__ ):
return not self.__eq__(a__ )
| 297 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=100 , UpperCamelCase_ : Any=" " ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = text.split(UpperCamelCase_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ )]
def _a ( UpperCamelCase_ : dict ) -> dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(UpperCamelCase_ ):
titles.append(title if title is not None else "" )
texts.append(UpperCamelCase_ )
return {"title": titles, "text": texts}
def _a ( UpperCamelCase_ : dict , UpperCamelCase_ : DPRContextEncoder , UpperCamelCase_ : DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=UpperCamelCase_ , padding="longest" , return_tensors="pt" )["input_ids"]
lowerCAmelCase__ = ctx_encoder(input_ids.to(device=UpperCamelCase_ ) , return_dict=UpperCamelCase_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _a ( UpperCamelCase_ : "RagExampleArguments" , UpperCamelCase_ : "ProcessingArguments" , UpperCamelCase_ : "IndexHnswArguments" , ) -> int:
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ = dataset.map(UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCamelCase_ )
lowerCAmelCase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ = dataset.map(
partial(UpperCamelCase_ , ctx_encoder=UpperCamelCase_ , ctx_tokenizer=UpperCamelCase_ ) , batched=UpperCamelCase_ , batch_size=processing_args.batch_size , features=UpperCamelCase_ , )
# And finally save your dataset
lowerCAmelCase__ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(UpperCamelCase_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=UpperCamelCase_ )
# And save the index
lowerCAmelCase__ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(UpperCamelCase_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowercase__ :
a_ =field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ), metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""}, )
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""}, )
a_ =field(
default="""facebook/rag-sequence-nq""", metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""}, )
a_ =field(
default="""facebook/dpr-ctx_encoder-multiset-base""", metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
}, )
a_ =field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" ), metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""}, )
@dataclass
class lowercase__ :
a_ =field(
default=_UpperCAmelCase, metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
}, )
a_ =field(
default=16, metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
}, )
@dataclass
class lowercase__ :
a_ =field(
default=768, metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""}, )
a_ =field(
default=128, metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a_, a_, a_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 339 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase__ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__UpperCAmelCase ) , torch_builtin(__UpperCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(__UpperCAmelCase ) , gelu_new(__UpperCAmelCase ) ) )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase__ = get_activation("gelu" )
lowerCAmelCase__ = get_activation("gelu_10" )
lowerCAmelCase__ = torch_builtin(__UpperCAmelCase )
lowerCAmelCase__ = geluaa(__UpperCAmelCase )
lowerCAmelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__UpperCAmelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__UpperCAmelCase ):
get_activation("bogus" )
with self.assertRaises(__UpperCAmelCase ):
get_activation(__UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = get_activation("gelu" )
lowerCAmelCase__ = 1
lowerCAmelCase__ = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ = acta.a
| 339 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : Any =logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _SCREAMING_SNAKE_CASE (lowercase__ ):
def __init__( self : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ) -> Any:
"""simple docstring"""
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCAmelCase ( self : List[Any] , __UpperCamelCase : int=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : str=None ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = {}
snake_case__ : Optional[Any] = {}
if prompt is not None:
snake_case__ : Tuple = prompt
if generate_kwargs is not None:
snake_case__ : Any = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
snake_case__ : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
snake_case__ : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__UpperCamelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int]=None ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = load_image(__UpperCamelCase )
if prompt is not None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
F'''Received an invalid text input, got - {type(__UpperCamelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
snake_case__ : Optional[int] = self.model.config.model_type
if model_type == "git":
snake_case__ : int = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
snake_case__ : List[Any] = self.tokenizer(text=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids
snake_case__ : Any = [self.tokenizer.cls_token_id] + input_ids
snake_case__ : str = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
snake_case__ : Optional[Any] = self.image_processor(images=__UpperCamelCase , header_text=__UpperCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
snake_case__ : Union[str, Any] = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
snake_case__ : List[Any] = self.tokenizer(__UpperCamelCase , return_tensors=self.framework )
model_inputs.update(__UpperCamelCase )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
snake_case__ : Tuple = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
snake_case__ : List[str] = None
return model_inputs
def lowerCAmelCase ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __UpperCamelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
snake_case__ : Dict = None
if generate_kwargs is None:
snake_case__ : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
snake_case__ : Optional[int] = model_inputs.pop(self.model.main_input_name )
snake_case__ : Union[str, Any] = self.model.generate(__UpperCamelCase , **__UpperCamelCase , **__UpperCamelCase )
return model_outputs
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = []
for output_ids in model_outputs:
snake_case__ : Dict = {
'''generated_text''': self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
| 574 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_lowercase : Any ="Usage of script: script_name <size_of_canvas:int>"
_lowercase : Optional[Any] =[0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> list[list[bool]]:
snake_case__ : List[str] = [[False for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
return canvas
def __UpperCAmelCase ( UpperCamelCase__ :list[list[bool]] ) -> None:
for i, row in enumerate(UpperCamelCase__ ):
for j, _ in enumerate(UpperCamelCase__ ):
snake_case__ : Optional[Any] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( UpperCamelCase__ :list[list[bool]] ) -> list[list[bool]]:
snake_case__ : Any = np.array(UpperCamelCase__ )
snake_case__ : Union[str, Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(UpperCamelCase__ ):
for c, pt in enumerate(UpperCamelCase__ ):
snake_case__ : List[str] = __judge_point(
UpperCamelCase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
snake_case__ : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
snake_case__ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( UpperCamelCase__ :bool , UpperCamelCase__ :list[list[bool]] ) -> bool:
snake_case__ : int = 0
snake_case__ : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
snake_case__ : int = pt
if pt:
if alive < 2:
snake_case__ : int = False
elif alive == 2 or alive == 3:
snake_case__ : Dict = True
elif alive > 3:
snake_case__ : Optional[Any] = False
else:
if alive == 3:
snake_case__ : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_lowercase : int =int(sys.argv[1])
# main working structure of this module.
_lowercase : Union[str, Any] =create_canvas(canvas_size)
seed(c)
_lowercase , _lowercase : Tuple =plt.subplots()
fig.show()
_lowercase : List[Any] =ListedColormap(["w", "k"])
try:
while True:
_lowercase : Optional[int] =run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 574 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase = logging.getLogger(__name__)
class lowercase ( lowercase__ ):
def __init__(self : Any ,SCREAMING_SNAKE_CASE_ : Tuple=-1 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = label_idx
def UpperCAmelCase (self : str ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = mode.value
lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ ,F"""{mode}.txt""" )
lowerCAmelCase = 1
lowerCAmelCase = []
with open(SCREAMING_SNAKE_CASE_ ,encoding='''utf-8''' ) as f:
lowerCAmelCase = []
lowerCAmelCase = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ ) )
guid_index += 1
lowerCAmelCase = []
lowerCAmelCase = []
else:
lowerCAmelCase = line.split(''' ''' )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' ,'''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ ) )
return examples
def UpperCAmelCase (self : List[str] ,SCREAMING_SNAKE_CASE_ : TextIO ,SCREAMING_SNAKE_CASE_ : TextIO ,SCREAMING_SNAKE_CASE_ : List ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(SCREAMING_SNAKE_CASE_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' ,line.split()[0] )
def UpperCAmelCase (self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
if path:
with open(SCREAMING_SNAKE_CASE_ ,'''r''' ) as f:
lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( lowercase__ ):
def __init__(self : List[Any] ) -> Any:
"""simple docstring"""
super().__init__(label_idx=-2 )
def UpperCAmelCase (self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
if path:
with open(SCREAMING_SNAKE_CASE_ ,'''r''' ) as f:
lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( lowercase__ ):
def UpperCAmelCase (self : List[str] ,SCREAMING_SNAKE_CASE_ : List[Any] ,SCREAMING_SNAKE_CASE_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = mode.value
lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ ,F"""{mode}.txt""" )
lowerCAmelCase = 1
lowerCAmelCase = []
with open(SCREAMING_SNAKE_CASE_ ,encoding='''utf-8''' ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = []
lowerCAmelCase = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ ) )
guid_index += 1
return examples
def UpperCAmelCase (self : Optional[int] ,SCREAMING_SNAKE_CASE_ : TextIO ,SCREAMING_SNAKE_CASE_ : TextIO ,SCREAMING_SNAKE_CASE_ : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = preds_list[example_id]
lowerCAmelCase = ''''''
for token in sentence:
out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(SCREAMING_SNAKE_CASE_ )
example_id += 1
def UpperCAmelCase (self : Any ,SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
if path:
with open(SCREAMING_SNAKE_CASE_ ,'''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 535 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase ( lowercase__ ):
def __init__(self : int ,SCREAMING_SNAKE_CASE_ : int = 101 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = length
def __len__(self : Any ) -> List[Any]:
"""simple docstring"""
return self.length
def __getitem__(self : str ,SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
"""simple docstring"""
return i
class lowercase :
def __call__(self : List[Any] ,SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(SCREAMING_SNAKE_CASE_ ), "labels": torch.tensor(SCREAMING_SNAKE_CASE_ )}
class lowercase ( nn.Module ):
def __init__(self : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCAmelCase = nn.Linear(120 ,80 )
def UpperCAmelCase (self : Optional[int] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class lowercase ( lowercase__ ):
@require_torch_neuroncore
def UpperCAmelCase (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"""--output_dir {output_dir}""".split()
lowerCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase ( lowercase__ ):
@require_torch_multi_gpu
def UpperCAmelCase (self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"""--output_dir {output_dir}""".split()
lowerCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase = HfArgumentParser((TrainingArguments,))
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
UpperCAmelCase = DummyDataset(dataset_length)
def __magic_name__ ( _lowerCamelCase: EvalPrediction ) -> Dict:
'''simple docstring'''
lowerCAmelCase = list(range(len(_lowerCamelCase ) ) )
lowerCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
UpperCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase = 2
UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase = None
| 535 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : int = DDIMPipeline
lowercase__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase__ : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowercase__ : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase__ : int = False
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
A__ = DDIMScheduler()
A__ = {"unet": unet, "scheduler": scheduler}
return components
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCamelCase__ )
else:
A__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A__ = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def lowercase_ ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase_ ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase_ ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
A__ = "google/ddpm-cifar10-32"
A__ = UNetaDModel.from_pretrained(UpperCamelCase__ )
A__ = DDIMScheduler()
A__ = DDIMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ddim.to(UpperCamelCase__ )
ddim.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = torch.manual_seed(0 )
A__ = ddim(generator=UpperCamelCase__ , eta=0.0 , output_type="numpy" ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
A__ = "google/ddpm-ema-bedroom-256"
A__ = UNetaDModel.from_pretrained(UpperCamelCase__ )
A__ = DDIMScheduler.from_pretrained(UpperCamelCase__ )
A__ = DDIMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ddpm.to(UpperCamelCase__ )
ddpm.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = torch.manual_seed(0 )
A__ = ddpm(generator=UpperCamelCase__ , output_type="numpy" ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A__ = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 261 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCAmelCase ="""base_with_context"""
def __a ( A , A ) -> str:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = ly_weight["attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __a ( A , A ) -> Dict:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = ly_weight["attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __a ( A , A ) -> Union[str, Any]:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
A__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
A__ = ly_weight["self_attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = ly_weight["MultiHeadDotProductAttention_0"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def __a ( A ) -> str:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A__ = jnp.tree_util.tree_map(onp.array , A )
A__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
A__ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
A__ = inference.parse_training_gin_file(A , A )
A__ = inference.InferenceModel(args.checkpoint_path , A )
A__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
A__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
A__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , A )
A__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , A )
A__ = load_decoder(ta_checkpoint["target"]["decoder"] , A )
A__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
A__ = SpectrogramDiffusionPipeline(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__UpperCAmelCase =parser.parse_args()
main(args)
| 261 | 1 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : int = 2000000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __UpperCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(__UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 566 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase__ ( *__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase__ ( __UpperCamelCase : Exception ):
'''simple docstring'''
__lowercase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase__ ( __UpperCamelCase : callable = None , __UpperCamelCase : int = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
__lowercase = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 566 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_lowercase ).to(_lowercase )
__lowercase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowercase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__lowercase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__lowercase = model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
__lowercase = -(labels.shape[-1] * loss.item())
__lowercase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 721 |
import unittest
from knapsack import knapsack as k
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 0 )
__lowercase = [6_0]
__lowercase = [1_0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 0 )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 5 )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = 5_0
__lowercase = [6_0, 1_0_0, 1_2_0]
__lowercase = [1_0, 2_0, 3_0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 527 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Any = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Any=8_192 , lowerCAmelCase_ : List[Any]=768 , lowerCAmelCase_ : Optional[int]=12 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : Optional[int]=3_072 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : Union[str, Any]=1e-12 , lowerCAmelCase_ : Tuple=224 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=[3, 5, 7, 11] , lowerCAmelCase_ : Optional[int]=[1, 2, 3, 6] , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=0.4 , lowerCAmelCase_ : Dict=256 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Dict=255 , **lowerCAmelCase_ : int , ) -> str:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = use_mask_token
SCREAMING_SNAKE_CASE_ = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE_ = use_relative_position_bias
SCREAMING_SNAKE_CASE_ = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE_ = layer_scale_init_value
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE_ = out_indices
SCREAMING_SNAKE_CASE_ = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE_ = use_auxiliary_head
SCREAMING_SNAKE_CASE_ = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_ = auxiliary_channels
SCREAMING_SNAKE_CASE_ = auxiliary_num_convs
SCREAMING_SNAKE_CASE_ = auxiliary_concat_input
SCREAMING_SNAKE_CASE_ = semantic_loss_ignore_index
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = version.parse("""1.11""" )
@property
def _lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 393 |
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [[0] * n for i in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = y_points[i]
for i in range(2 ,UpperCAmelCase ):
for j in range(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : List[str]
lowercase : Optional[str] = None
# Automatically constructed
lowercase : ClassVar[str] = "dict"
lowercase : ClassVar[Any] = None
lowercase : str = field(default="Translation" , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def __call__( self : Optional[int] ) -> Any:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Optional[List] = None
lowercase : Optional[int] = None
lowercase : Optional[str] = None
# Automatically constructed
lowercase : ClassVar[str] = "dict"
lowercase : ClassVar[Any] = None
lowercase : str = field(default="TranslationVariableLanguages" , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
A : Tuple =sorted(set(self.languages ) ) if self.languages else None
A : Optional[Any] =len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> Dict:
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
A : Tuple =set(self.languages )
if self.languages and set(SCREAMING_SNAKE_CASE__ ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(SCREAMING_SNAKE_CASE__ ) - lang_set ) )}) are not in valid set ({", ".join(SCREAMING_SNAKE_CASE__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A : List[str] =[]
for lang, text in translation_dict.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A , A : Any =zip(*sorted(SCREAMING_SNAKE_CASE__ ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 661 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 1 |
from __future__ import annotations
__magic_name__ = 8.988E9 # units = N * m^s * C^-2
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = abs(chargea * chargea)
if (force, chargea, chargea, distance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if distance < 0:
raise ValueError("Distance cannot be negative")
if force == 0:
lowerCamelCase_ : Tuple = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCamelCase_ : Optional[Any] = abs(lowerCAmelCase_) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCamelCase_ : List[Any] = abs(lowerCAmelCase_) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCamelCase_ : Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase_)) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = CanineTokenizer
__UpperCAmelCase : int = False
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
lowerCamelCase_ : Dict = 1024
return tokenizer
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.canine_tokenizer
lowerCamelCase_ : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
lowerCamelCase_ : Dict = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ : List[Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.canine_tokenizer
lowerCamelCase_ : Tuple = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
lowerCamelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , a_ )
self.assertIn("attention_mask" , a_ )
self.assertIn("token_type_ids" , a_ )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.canine_tokenizer
lowerCamelCase_ : Tuple = [
"What's the weater?",
"It's about 25 degrees.",
]
lowerCamelCase_ : Optional[Any] = tokenizer(
text_target=a_ , max_length=32 , padding="max_length" , truncation=a_ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCamelCase ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : List[Any] = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
shutil.rmtree(a_ )
lowerCamelCase_ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : List[Any] = tempfile.mkdtemp()
lowerCamelCase_ : Tuple = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Dict = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ : List[str] = chr(0Xe007 )
additional_special_tokens.append(a_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Any = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : Any = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
self.assertIn(a_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ : int = tokenizer.__class__.from_pretrained(a_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ ,lowerCamelCase_ : str = self.get_clean_sequence(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Tuple = 0Xe005
lowerCamelCase_ : Dict = chr(a_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
lowerCamelCase_ : List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a_ )
lowerCamelCase_ : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(a_ , input_encoded + special_token_id )
lowerCamelCase_ : Optional[int] = tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Optional[int] = chr(0Xe005 )
lowerCamelCase_ : str = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
lowerCamelCase_ : Tuple = tokenizer.tokenize(a_ )
lowerCamelCase_ : List[Any] = tokenizer.tokenize(a_ )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(token_a[0] , a_ )
self.assertEqual(token_a[0] , a_ )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ : List[str] = 0Xe006
lowerCamelCase_ : Any = chr(a_ )
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a_ )
tokenizer.from_pretrained(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : List[Any] = json.load(a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : int = json.load(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Any = 0Xe006
lowerCamelCase_ : List[Any] = chr(a_ )
lowerCamelCase_ : Any = [new_token_a]
lowerCamelCase_ : Optional[Any] = [new_token_a]
with open(os.path.join(a_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ : str = tokenizer_class.from_pretrained(a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ : Optional[int] = 0Xe007
lowerCamelCase_ : List[str] = chr(a_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ : int = [AddedToken(a_ , lstrip=a_ )]
lowerCamelCase_ : Dict = tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Union[str, Any] = "hello world"
if self.space_between_special_tokens:
lowerCamelCase_ : int = "[CLS] hello world [SEP]"
else:
lowerCamelCase_ : int = input
lowerCamelCase_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.decode(a_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a_ , [output, output.lower()] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Tuple = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCamelCase_ : Optional[int] = "a"
lowerCamelCase_ : Dict = ord(a_ )
for attr in attributes_list:
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [] )
lowerCamelCase_ : Optional[int] = 0Xe006
lowerCamelCase_ : List[str] = chr(a_ )
setattr(a_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
| 250 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCamelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCamelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCamelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCamelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCamelCase , default=UpperCamelCase )
parser.add_argument("""--learning_rate""" , type=UpperCamelCase , default=5e-4 )
parser.add_argument("""--seed""" , type=UpperCamelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCamelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCamelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCamelCase , default=0.0_1 )
parser.add_argument("""--output_dir""" , type=UpperCamelCase , default="""./results""" )
return parser.parse_args()
a__ : Union[str, Any] = load("""accuracy""")
def snake_case (UpperCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = eval_pred
lowerCamelCase__ = np.argmax(UpperCamelCase , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , a_ : Optional[int] ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ = trainer
def _UpperCamelCase ( self : List[str] , a_ : Tuple , a_ : Any , a_ : Optional[Any] , **a_ : Optional[Any] ):
"""simple docstring"""
if control.should_evaluate:
lowerCamelCase__ = deepcopy(a_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = get_args()
set_seed(args.seed )
lowerCamelCase__ = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
lowerCamelCase__ = dataset.train_test_split(test_size=0.2 )
lowerCamelCase__ = train_test["""test"""].train_test_split(test_size=0.5 )
lowerCamelCase__ = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCamelCase__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCamelCase__ = False
lowerCamelCase__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCamelCase : Optional[Any] ):
lowerCamelCase__ = tokenizer(example["""src"""] , truncation=UpperCamelCase , max_length=1024 )
lowerCamelCase__ = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCamelCase__ = train_test_validation.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=train_test_validation["""train"""].column_names , )
lowerCamelCase__ = DataCollatorWithPadding(tokenizer=UpperCamelCase )
lowerCamelCase__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
lowerCamelCase__ = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , compute_metrics=UpperCamelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCamelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 235 |
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
return str(UpperCamelCase ) == str(UpperCamelCase )[::-1]
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
return int(UpperCamelCase ) + int(str(UpperCamelCase )[::-1] )
def snake_case (UpperCamelCase : int = 10000 ):
'''simple docstring'''
lowerCamelCase__ = []
for num in range(1 , UpperCamelCase ):
lowerCamelCase__ = 0
lowerCamelCase__ = num
while iterations < 50:
lowerCamelCase__ = sum_reverse(UpperCamelCase )
iterations += 1
if is_palindrome(UpperCamelCase ):
break
else:
lychrel_nums.append(UpperCamelCase )
return len(UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : Tuple = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = """gpt_neo"""
_SCREAMING_SNAKE_CASE : Dict = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=5_02_57 , SCREAMING_SNAKE_CASE__ : List[str]=20_48 , SCREAMING_SNAKE_CASE__ : str=20_48 , SCREAMING_SNAKE_CASE__ : Optional[Any]=24 , SCREAMING_SNAKE_CASE__ : Tuple=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=2_56 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1e-5 , SCREAMING_SNAKE_CASE__ : int=0.0_2 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Tuple=5_02_56 , SCREAMING_SNAKE_CASE__ : List[Any]=5_02_56 , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_layers
__lowerCAmelCase = num_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = window_size
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_dropout
__lowerCAmelCase = embed_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = attention_types
__lowerCAmelCase = self.expand_attention_types_params(SCREAMING_SNAKE_CASE__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
__lowerCAmelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Tuple ) -> List[Any]:
'''simple docstring'''
import torch
__lowerCAmelCase = input.size()
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = shape[dimension]
__lowerCAmelCase = torch.arange(0 , snake_case_ , snake_case_ )
__lowerCAmelCase = torch.div(sizedim - size , snake_case_ , rounding_mode="""floor""" ) + 1
__lowerCAmelCase = torch.arange(snake_case_ ) + low_indices[:min_length][:, None]
__lowerCAmelCase = [slice(snake_case_ )] * rank
__lowerCAmelCase = indices
__lowerCAmelCase = input[s]
__lowerCAmelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case_ )
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
import torch
__lowerCAmelCase = torch.arange(1 , snake_case_ )
__lowerCAmelCase = torch.remainder(snake_case_ , snake_case_ )
__lowerCAmelCase = remainders == 0
__lowerCAmelCase = candidates[divisor_indices]
__lowerCAmelCase = torch.max(snake_case_ )
return largest_divisor, torch.div(snake_case_ , snake_case_ , rounding_mode="""floor""" )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def a ( self : List[str] ) -> int:
return self._config.num_heads
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__lowerCAmelCase = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def a ( self : str ) -> int:
return 13
| 427 |
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return "".join(chr(ord(snake_case_ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 427 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase__ :
def __init__( self , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_UpperCamelCase = len(a ) - 1
def A_ ( self , a ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCamelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a ) , 5 ) == 1
return output_values
def A_ ( self , a ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCamelCase = self.basis_function(a )
_UpperCamelCase = 0.0
_UpperCamelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A_ ( self , a = 0.01 ) -> Any:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
_UpperCamelCase = [] # x coordinates of points to plot
_UpperCamelCase = [] # y coordinates of points to plot
_UpperCamelCase = 0.0
while t <= 1:
_UpperCamelCase = self.bezier_curve_function(a )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_UpperCamelCase = [i[0] for i in self.list_of_points]
_UpperCamelCase = [i[1] for i in self.list_of_points]
plt.plot(
a , a , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(a , a , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 202 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = "x" , lowerCAmelCase = 1_0**-1_0 , lowerCAmelCase = 1 , ) -> complex:
"""simple docstring"""
_UpperCamelCase = symbols(lowerCAmelCase )
_UpperCamelCase = lambdify(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = lambdify(lowerCAmelCase , diff(lowerCAmelCase , lowerCAmelCase ) )
_UpperCamelCase = starting_point
while True:
if diff_function(lowerCAmelCase ) != 0:
_UpperCamelCase = prev_guess - multiplicity * func(lowerCAmelCase ) / diff_function(
lowerCAmelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCamelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 202 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
SCREAMING_SNAKE_CASE : Union[str, Any] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=1) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = tokenizer
_lowercase : str = dataset
_lowercase : str = len(__A) if n_tasks is None else n_tasks
_lowercase : str = n_copies
def __iter__( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : str = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip())
_lowercase : Optional[int] = self.tokenizer(__A, padding=__A, return_tensors='pt')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Tuple = start_length
_lowercase : Tuple = eof_strings
_lowercase : int = tokenizer
def __call__( self, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
_lowercase : Any = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(__A)
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : int = re.split('(%s)' % '|'.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=20 , **lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
_lowercase : Optional[Any] = batch['ids'].shape[-1]
_lowercase : List[str] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
_lowercase : Optional[Any] = batch['task_id'].repeat(_lowercase )
_lowercase : Tuple = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase : Optional[Any] = accelerator.gather((generated_tokens, generated_tasks) )
_lowercase : int = generated_tokens.cpu().numpy()
_lowercase : List[str] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
_lowercase : Any = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Optional[Any] = HfArgumentParser(_lowercase )
_lowercase : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase : int = 'false'
if args.num_workers is None:
_lowercase : Any = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase : Optional[int] = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
_lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase : str = tokenizer.eos_token
_lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase : Tuple = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
_lowercase : Any = load_dataset('openai_humaneval' )
_lowercase : List[Any] = load_metric('code_eval' )
_lowercase : Any = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
_lowercase : List[str] = args.n_samples // args.batch_size
_lowercase : Optional[int] = TokenizedDataset(_lowercase , human_eval['test'] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase : Any = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase : str = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
_lowercase , _lowercase : Any = accelerator.prepare(_lowercase , _lowercase )
_lowercase : Dict = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
_lowercase : Optional[Any] = []
for task in tqdm(range(_lowercase ) ):
_lowercase : str = human_eval['test'][task]['test']
_lowercase : List[Any] = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 89 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A = 2
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , *, # begin keyword-only arguments
__A: Any="<s>" , __A: List[str]="<pad>" , __A: Optional[Any]="</s>" , __A: Dict="<unk>" , __A: Any=None , ) -> Tuple:
_A ,_A ,_A ,_A = bos, unk, pad, eos
_A = []
_A = []
_A = {}
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__A )
_A = len(self.symbols )
def __eq__( self: Any , __A: Any ) -> Optional[Any]:
return self.indices == other.indices
def __getitem__( self: Tuple , __A: Optional[int] ) -> int:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self: Optional[Any] ) -> Optional[Any]:
return len(self.symbols )
def __contains__( self: Dict , __A: List[str] ) -> Union[str, Any]:
return sym in self.indices
@classmethod
def __A ( cls: Tuple , __A: Optional[Any] ) -> Optional[Any]:
_A = cls()
d.add_from_file(__A )
return d
def __A ( self: List[Any] , __A: List[str] , __A: List[Any]=1 , __A: List[Any]=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
_A = self.indices[word]
_A = self.count[idx] + n
return idx
else:
_A = len(self.symbols )
_A = idx
self.symbols.append(__A )
self.count.append(__A )
return idx
def __A ( self: Optional[Any] , __A: Optional[int] ) -> str:
return 0
def __A ( self: List[str] , __A: Optional[Any] ) -> List[Any]:
if isinstance(__A , __A ):
try:
with open(__A , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__A ) )
return
_A = f.readlines()
_A = self._load_meta(__A )
for line in lines[indices_start_line:]:
try:
_A ,_A = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_A = True
_A ,_A = line.rsplit(''' ''' , 1 )
else:
_A = False
_A = int(__A )
_A = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(__A ) )
self.add_symbol(__A , n=__A , overwrite=__A )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = dict((re.sub(R'''@@$''' , '''''' , _lowercase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _lowercase ), v) for k, v in d.items() )
_A = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
_A = d[k] # restore
return da
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if not os.path.exists(_lowercase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(_lowercase , exist_ok=_lowercase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_A = os.path.join(_lowercase , '''checkpoint.pt''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
_A = torch.load(_lowercase , map_location='''cpu''' )
_A = chkpt['''cfg''']['''model''']
# dicts
_A = os.path.join(_lowercase , '''dict.txt''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
_A = Dictionary.load(_lowercase )
_A = rewrite_dict_keys(src_dict.indices )
_A = len(_lowercase )
_A = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# merges_file (bpecodes)
_A = os.path.join(_lowercase , '''bpecodes''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
_A = os.path.join(_lowercase , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(_lowercase , _lowercase )
# model config
_A = os.path.join(_lowercase , '''config.json''' )
_A = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# tokenizer config
_A = os.path.join(_lowercase , _lowercase )
_A = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# model
_A = chkpt['''model''']
# remove unneeded keys
_A = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(_lowercase , _lowercase )
_A = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_A = model_state_dict.pop(_lowercase )
else:
_A = model_state_dict.pop(_lowercase )
_A = BioGptConfig.from_pretrained(_lowercase )
_A = BioGptForCausalLM(_lowercase )
# check that it loads ok
model_new.load_state_dict(_lowercase )
# save
_A = os.path.join(_lowercase , _lowercase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_lowercase , _lowercase )
print('''Conversion is done!''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 484 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase (__lowercase , unittest.TestCase ):
__A = CTRLTokenizer
__A = False
__A = False
def _a ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
lowercase = dict(zip(_A , range(len(_A ) ) ) )
lowercase = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def _a ( self , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_A )
def _a ( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = """adapt react readapt apt"""
lowercase = """adapt react readapt apt"""
return input_text, output_text
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase = """adapt react readapt apt"""
lowercase = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
lowercase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 705 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 0 |
def _a ( lowercase__ : list , lowercase__ : int = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = length or len(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
return list_data if not swapped else bubble_sort(lowercase__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A_ = flax_key_tuple[:-1] + ('''weight''',)
A_ = torch.permute(_UpperCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ):
# linear layer
A_ = flax_key_tuple[:-1] + ('''weight''',)
A_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A_ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[int] , _UpperCAmelCase :str , _UpperCAmelCase :Any ) -> Dict:
'''simple docstring'''
if "metadata" in layer:
A_ = layer.split('''metadata''' )
A_ = ''''''.join(split_layer[0] )[:-1]
A_ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
A_ = layer.split('''kvstore''' )
A_ = ''''''.join(split_layer[0] )[:-1]
A_ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
A_ = layer.split('''/''' )
A_ = '''/'''.join(split_layer[:-1] )
A_ = (split_layer[-1],)
if "kvstore/path" in layer:
A_ = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A_ = '''file'''
else:
A_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCAmelCase_ ( _UpperCAmelCase :int , _UpperCAmelCase :Union[str, Any] ) -> Any:
'''simple docstring'''
A_ = rename_keys(_UpperCAmelCase )
A_ = {}
for k, v in current_block.items():
A_ = v
A_ = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :str , _UpperCAmelCase :List[Any] , _UpperCAmelCase :str , _UpperCAmelCase :str = WEIGHTS_NAME ) -> Optional[int]:
'''simple docstring'''
A_ = convert_file_size_to_int(_UpperCAmelCase )
A_ = []
A_ = {}
A_ = 0
A_ = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
A_ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
A_ = flatten_dict(_UpperCAmelCase , sep='''/''' )
A_ = {}
for layer in checkpoint_info.keys():
A_ , A_ , A_ = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if curr_real_layer_name in all_layers:
A_ = content
else:
A_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A_ = torch.tensor(_UpperCAmelCase )
A_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A_ , A_ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , _UpperCAmelCase )
A_ = '''/'''.join(_UpperCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A_ = os.path.join(
_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
A_ = {}
A_ = 0
A_ = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A_ = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_UpperCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A_ = {}
A_ = {}
for idx, shard in enumerate(_UpperCAmelCase ):
A_ = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
A_ = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
A_ = shard
for key in shard:
A_ = shard_file
# Add the metadata
A_ = {'''total_size''': total_size}
A_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
A_ = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '''\n'''
f.write(_UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a__ : Tuple = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A_ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
A_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
A_ = TaTokenizer.from_pretrained('''t5-small''' )
A_ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
A_ = tokenizer(_UpperCAmelCase , return_tensors='''pt''' ).input_ids
A_ = model.generate(_UpperCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 188 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
lowercase : int = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
lowercase : List[str] = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = torch.load(_lowerCamelCase , map_location="cpu")
return sd
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple=rename_keys_prefix) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = OrderedDict()
__UpperCamelCase : Optional[int] = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCamelCase : Union[str, Any] = key
for name_pair in rename_keys_prefix:
__UpperCamelCase : Any = new_key.replace(name_pair[0] , name_pair[1])
__UpperCamelCase : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCamelCase : str = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str) -> Dict:
'''simple docstring'''
assert (
checkpoint_path.split("/")[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
__UpperCamelCase : List[Any] = "pretraining"
if "vcr" in checkpoint_path:
__UpperCamelCase : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCamelCase : Any = {"visual_embedding_dim": 2_048}
elif "vqa" in checkpoint_path:
__UpperCamelCase : str = {"visual_embedding_dim": 2_048}
elif "nlvr" in checkpoint_path:
__UpperCamelCase : Optional[Any] = {"visual_embedding_dim": 1_024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.')
else:
if "vcr" in checkpoint_path:
__UpperCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
__UpperCamelCase : str = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCamelCase : Optional[int] = {"visual_embedding_dim": 2_048}
__UpperCamelCase : Optional[int] = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCamelCase : int = {"visual_embedding_dim": 2_048, "num_labels": 3_129}
__UpperCamelCase : List[Any] = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCamelCase : Any = {
"visual_embedding_dim": 1_024,
"num_labels": 2,
}
__UpperCamelCase : Tuple = "nlvr"
__UpperCamelCase : Union[str, Any] = VisualBertConfig(**_lowerCamelCase)
# Load State Dict
__UpperCamelCase : List[str] = load_state_dict(_lowerCamelCase)
__UpperCamelCase : str = get_new_dict(_lowerCamelCase , _lowerCamelCase)
if model_type == "pretraining":
__UpperCamelCase : List[str] = VisualBertForPreTraining(_lowerCamelCase)
elif model_type == "vqa":
__UpperCamelCase : Any = VisualBertForQuestionAnswering(_lowerCamelCase)
elif model_type == "nlvr":
__UpperCamelCase : Dict = VisualBertForVisualReasoning(_lowerCamelCase)
elif model_type == "multichoice":
__UpperCamelCase : List[Any] = VisualBertForMultipleChoice(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
# Save Checkpoints
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
lowercase : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 701 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int=32 , _lowerCamelCase : str=10 , _lowerCamelCase : Dict=100 , _lowerCamelCase : int=1_026 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : str="data/tokenized_stories_train_wikitext103.jbl" , _lowerCamelCase : Any="igf_context_pairs.jbl" , ) -> str:
'''simple docstring'''
set_seed(3)
# generate train_data and objective_set
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = generate_datasets(
_lowerCamelCase , _lowerCamelCase , number=_lowerCamelCase , min_len=1_026 , trim=_lowerCamelCase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCamelCase : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load pretrained model
__UpperCamelCase : str = load_gpta("gpt2").to(_lowerCamelCase)
print("computing perplexity on objective set")
__UpperCamelCase : Union[str, Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).item()
print("perplexity on objective set:" , _lowerCamelCase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any=15 , _lowerCamelCase : Union[str, Any]=128 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]="igf_model.pt" , ) -> Any:
'''simple docstring'''
set_seed(42)
# Load pre-trained model
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("gpt2")
# Initialize secondary learner to use embedding weights of model
__UpperCamelCase : Any = SecondaryLearner(_lowerCamelCase)
# Train secondary learner
__UpperCamelCase : Union[str, Any] = train_secondary_learner(
_lowerCamelCase , _lowerCamelCase , max_epochs=_lowerCamelCase , batch_size=_lowerCamelCase , eval_freq=100 , igf_model_path=_lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=32 , _lowerCamelCase : Tuple=1_000 , _lowerCamelCase : Dict=16 , _lowerCamelCase : Union[str, Any]=1.0 , _lowerCamelCase : Optional[Any]=recopy_gpta , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Union[str, Any]="gpt2_finetuned.pt" , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
__UpperCamelCase : List[Any] = RandomSampler(_lowerCamelCase)
__UpperCamelCase : Any = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase)
__UpperCamelCase : Tuple = max_steps // (len(_lowerCamelCase)) + 1
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = recopy_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCamelCase)
secondary_learner.eval()
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : Any = 0
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = []
# Compute the performance of the transformer model at the beginning
__UpperCamelCase : str = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
test_perps.append(_lowerCamelCase)
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase)
for epoch in range(int(_lowerCamelCase)):
for step, example in enumerate(_lowerCamelCase):
torch.cuda.empty_cache()
__UpperCamelCase : Optional[Any] = random.randint(0 , example.size(2) - context_len - 1)
__UpperCamelCase : Optional[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCamelCase : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase)
__UpperCamelCase : int = True
if secondary_learner is not None:
__UpperCamelCase : Optional[int] = secondary_learner.forward(
torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase).unsqueeze(0))[0].item()
observed_qs.append(float(_lowerCamelCase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__UpperCamelCase : List[str] = -1
if predicted_q < threshold:
__UpperCamelCase : Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
__UpperCamelCase : Optional[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCamelCase : str = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCamelCase : List[Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
test_perps.append(_lowerCamelCase)
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCamelCase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task")
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_lowerCamelCase , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_lowerCamelCase , default=_lowerCamelCase , help="A seed for reproducible training.")
parser.add_argument(
"--context_len" , default=32 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_lowerCamelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_lowerCamelCase , help="secondary model evaluation is triggered at eval_freq")
parser.add_argument("--max_steps" , default=1_000 , type=_lowerCamelCase , help="To calculate training epochs")
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_lowerCamelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_lowerCamelCase , help="batch size of training data of language model(gpt2) ")
parser.add_argument(
"--eval_interval" , default=10 , type=_lowerCamelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_lowerCamelCase , help="The number of examples split to be used as objective_set/test_data")
parser.add_argument(
"--min_len" , default=1_026 , type=_lowerCamelCase , help="The minimum length of the article to be used as objective set")
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_lowerCamelCase , help="number of epochs to train secondary learner")
parser.add_argument("--trim" , default=_lowerCamelCase , type=_lowerCamelCase , help="truncate the example if it exceeds context length")
parser.add_argument(
"--threshold" , default=1.0 , type=_lowerCamelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_lowerCamelCase , help="finetuned_model_name")
parser.add_argument(
"--recopy_model" , default=_lowerCamelCase , type=_lowerCamelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_lowerCamelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__UpperCamelCase : Any = joblib.load("data/IGF_values.jbl")
# Train secondary learner
__UpperCamelCase : Optional[Any] = training_secondary_learner(
_lowerCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("gpt2")
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1_026 , trim=_lowerCamelCase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCamelCase , secondary_learner=_lowerCamelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 94 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="vit"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=2_24 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=16 , **UpperCamelCase_ , ) -> Any:
super().__init__(**UpperCamelCase_ )
__lowercase : Tuple = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : str = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Optional[int] = image_size
__lowercase : List[str] = patch_size
__lowercase : Union[str, Any] = num_channels
__lowercase : str = qkv_bias
__lowercase : int = encoder_stride
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =version.parse("1.11" )
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ) -> float:
return 1E-4
| 76 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = StableDiffusionSAGPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
_UpperCamelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase =CLIPTextModel(UpperCamelCase__ )
_UpperCamelCase =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=0 ) -> Tuple:
if str(UpperCamelCase__ ).startswith('''mps''' ):
_UpperCamelCase =torch.manual_seed(UpperCamelCase__ )
else:
_UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_UpperCamelCase ={
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self : Union[str, Any] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : str ) -> int:
_UpperCamelCase =StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_UpperCamelCase =sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase ='''.'''
_UpperCamelCase =torch.manual_seed(0 )
_UpperCamelCase =sag_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_UpperCamelCase =output.images
_UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCamelCase__ ( self : int ) -> Tuple:
_UpperCamelCase =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCamelCase =sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase ='''.'''
_UpperCamelCase =torch.manual_seed(0 )
_UpperCamelCase =sag_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_UpperCamelCase =output.images
_UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCamelCase__ ( self : str ) -> Tuple:
_UpperCamelCase =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCamelCase =sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase ='''.'''
_UpperCamelCase =torch.manual_seed(0 )
_UpperCamelCase =sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
_UpperCamelCase =output.images
assert image.shape == (1, 512, 768, 3)
| 404 | 0 |
import argparse
import json
from tqdm import tqdm
def a ():
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=lowerCAmelCase__ , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=lowerCAmelCase__ , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=lowerCAmelCase__ , help="""where to store parsed gold_data_path file""" , )
__a = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
__a = json.load(lowerCAmelCase__ )
for dpr_record in tqdm(lowerCAmelCase__ ):
__a = dpr_record["""question"""]
__a = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(lowerCAmelCase__ ) + """\n""" )
if __name__ == "__main__":
main()
| 706 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """imagegpt"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __A=512 + 1 , __A=32 * 32 , __A=512 , __A=24 , __A=8 , __A=None , __A="quick_gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1E-5 , __A=0.02 , __A=True , __A=True , __A=False , __A=False , __A=False , **__A , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = scale_attn_by_inverse_layer_idx
__a = reorder_and_upcast_attn
__a = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
@property
def snake_case_ ( self ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def snake_case_ ( self , __A , __A = 1 , __A = -1 , __A = False , __A = None , __A = 3 , __A = 32 , __A = 32 , ):
__a = self._generate_dummy_images(__A , __A , __A , __A )
__a = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs
| 209 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : int = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
if n == 1 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return 0
elif n == 2:
return 1
else:
__lowerCAmelCase: Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: str = 0
__lowerCAmelCase: Any = 2
while digits < n:
index += 1
__lowerCAmelCase: Optional[int] = len(str(fibonacci(__SCREAMING_SNAKE_CASE ) ) )
return index
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 346 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any] ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__, UpperCamelCase__ ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
__a , __a = 1, 1
for _ in range(number_of_steps - 1 ):
__a , __a = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(SCREAMING_SNAKE_CASE__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase = 10
def _snake_case ( __snake_case : list[int] ):
"""simple docstring"""
_lowerCamelCase : Any = 1
_lowerCamelCase : Dict = max(snake_case__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCamelCase : list[list] = [[] for _ in range(snake_case__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCamelCase : List[str] = int((i / placement) % RADIX )
buckets[tmp].append(snake_case__ )
# put each buckets' contents into list_of_ints
_lowerCamelCase : Dict = 0
for b in range(snake_case__ ):
for i in buckets[b]:
_lowerCamelCase : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : int = str(snake_case__ )
return len(snake_case__ ) == 9 and set(snake_case__ ) == set("""123456789""" )
def UpperCAmelCase__ ():
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_snake_case : List[Any] = 10_00_02 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_snake_case : List[str] = 1_00_20_03 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 609 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self , lowercase , lowercase , lowercase = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self ) -> str:
lowerCAmelCase = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(lowercase , len(str(lowercase ) ) )
lowerCAmelCase = f'%{max_element_length}s'
# Make string and return
def single_line(lowercase ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def _snake_case ( self , lowercase ) -> bool:
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowercase ) -> Any:
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowercase , lowercase ) -> None:
assert self.validate_indicies(lowercase )
lowerCAmelCase = value
def __add__( self , lowercase ) -> Matrix:
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self , lowercase ) -> Matrix:
return self + (-another)
def __mul__( self , lowercase ) -> Matrix:
if isinstance(lowercase , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = f'Unsupported type given for another ({type(lowercase )})'
raise TypeError(lowercase )
def _snake_case ( self ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def _snake_case ( self , lowercase , lowercase ) -> Any:
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(F'a^(-1) is {ainv}' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}' )
def UpperCAmelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 393 | 0 |
A_ : Optional[Any] = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 303 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A_ : int = TypeVar('T')
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (position - 1) // 2
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (2 * position) + 1
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (2 * position) + 2
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = []
__UpperCAmelCase = {}
__UpperCAmelCase = 0
def __len__(self ) -> int:
return self.elements
def __repr__(self ) -> str:
return str(self.heap )
def lowerCAmelCase_ (self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase = self.elements
self.elements += 1
self._bubble_up(lowercase__ )
def lowerCAmelCase_ (self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase = self.heap[0]
self._bubble_down(lowercase__ )
return elem
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> None:
# Update the weight of the given key
__UpperCAmelCase = self.position_map[elem]
__UpperCAmelCase = (elem, weight)
if position > 0:
__UpperCAmelCase = get_parent_position(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase__ )
else:
self._bubble_down(lowercase__ )
else:
self._bubble_down(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase = self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase = get_parent_position(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_up(lowercase__ )
return None
def lowerCAmelCase_ (self , lowercase__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase = self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase = self.heap[curr_pos]
__UpperCAmelCase = get_child_left_position(lowercase__ )
__UpperCAmelCase = get_child_right_position(lowercase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase = self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
return None
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase = self.heap[nodea_pos][0]
__UpperCAmelCase = self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase = nodea_pos
__UpperCAmelCase = nodea_pos
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = {}
__UpperCAmelCase = 0
def __repr__(self ) -> str:
return str(self.connections )
def __len__(self ) -> int:
return self.nodes
def lowerCAmelCase_ (self , lowercase__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase = {}
self.nodes += 1
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(lowercase__ )
self.add_node(lowercase__ )
__UpperCAmelCase = weight
__UpperCAmelCase = weight
def __a ( SCREAMING_SNAKE_CASE , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
__UpperCAmelCase = {node: maxsize for node in graph.connections}
__UpperCAmelCase = {node: None for node in graph.connections}
__UpperCAmelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase = priority_queue.extract_min()
__UpperCAmelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
__UpperCAmelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
__UpperCAmelCase = node
return dist, parent
| 303 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__a : str = """CompVis/stable-diffusion-v1-1"""
__a : Any = """CompVis/stable-diffusion-v1-2"""
__a : List[str] = """CompVis/stable-diffusion-v1-3"""
__a : str = """CompVis/stable-diffusion-v1-4"""
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , ) -> List[Any]:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , requires_safety_checker=SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("_" )}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 414 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__a : int = 2_0_4_8
__a : Optional[int] = 4_0_9_6
__a : Optional[int] = 4_2
__a : Optional[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
__a : Dict = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( lowercase_ ) -> List[Any]:
'''simple docstring'''
def choose_first(lowercase_ , lowercase_=False ):
assert isinstance(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
UpperCamelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCamelCase = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
UpperCamelCase = {"id": example["id"]}
UpperCamelCase = example["annotations"]
UpperCamelCase = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCamelCase = ["yes"] if 1 in yes_no_answer else ["no"]
UpperCamelCase = UpperCamelCase = []
UpperCamelCase = UpperCamelCase = []
UpperCamelCase = ["<cls>"]
else:
UpperCamelCase = ["short"]
UpperCamelCase = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
UpperCamelCase = ["long"]
UpperCamelCase = choose_first(annotation["long_answer"] , is_long_answer=lowercase_ )
UpperCamelCase = []
answer.update(lowercase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCamelCase = True
else:
UpperCamelCase = False
UpperCamelCase = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowercase_ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __magic_name__ ( lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = _get_single_answer(lowercase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase = example["document"]["tokens"]
UpperCamelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowercase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCamelCase = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCamelCase = example["document"]["tokens"]
UpperCamelCase = answer["start_token"]
UpperCamelCase = answer["end_token"]
UpperCamelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCamelCase = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCamelCase = doc["is_html"][answer["start_token"] : answer["end_token"]]
UpperCamelCase = doc["token"][answer["start_token"] : answer["end_token"]]
UpperCamelCase = " ".join([old[i] for i in range(len(lowercase_ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowercase_ , end="\n" )
print("Old:" , lowercase_ , end="\n\n" )
return {
"context": " ".join(lowercase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=True ) -> int:
'''simple docstring'''
UpperCamelCase = get_context_and_ans(lowercase_ , assertion=lowercase_ )
UpperCamelCase = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCamelCase = tokenizer(example["question"]["text"] , out["context"] ).input_ids
UpperCamelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = input_ids[:q_len]
UpperCamelCase = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCamelCase = i + max_length - q_len
UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowercase_ ),
"end_token": [-100] * len(lowercase_ ),
"category": category,
},
}
UpperCamelCase = out["context"].split()
UpperCamelCase = splitted_context[answer["end_token"]]
UpperCamelCase = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowercase_ , ).input_ids )
UpperCamelCase = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowercase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCamelCase = len(tokenizer(lowercase_ , add_special_tokens=lowercase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCamelCase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
UpperCamelCase = answer["start_token"]
UpperCamelCase = answer["end_token"]
if assertion:
UpperCamelCase = tokenizer.decode(lowercase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowercase_ , end="\n\n" )
if len(lowercase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCamelCase = input_ids[:q_len]
UpperCamelCase = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCamelCase = i + max_length - q_len
UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCamelCase = start_token - i + q_len
UpperCamelCase = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
UpperCamelCase = -100
UpperCamelCase = -100
answers_category.append("null" )
UpperCamelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase_ )
answers_end_token.append(lowercase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowercase_ ) )
print("Old:" , tokenizer.decode(lowercase_ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=False ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_strided_contexts_and_ans(
lowercase_ , lowercase_ , doc_stride=lowercase_ , max_length=lowercase_ , assertion=lowercase_ , )
return example
def __magic_name__ ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
with jsonlines.open(lowercase_ , "a" ) as writer:
for example in tqdm(lowercase_ , total=len(lowercase_ ) , desc="Saving samples ... " ):
UpperCamelCase = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__a : Dict = load_dataset("""natural_questions""")
__a : int = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
__a : List[Any] = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
__a : Tuple = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
__a : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__a : Optional[int] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
__a : List[Any] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 414 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , **__lowerCamelCase : Optional[int] ):
snake_case : Optional[Any] = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
snake_case : List[Any] = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 204 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Any=3 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : int=5 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : int=4 , UpperCAmelCase : List[str]=None , ) -> Dict:
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Any = type_sequence_label_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : str = num_choices
lowerCamelCase__ : int = scope
def A_ ( self : Dict ) -> Any:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Any ) -> Any:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase , )
def A_ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : Tuple = FalconModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[Any] = FalconModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
lowerCamelCase__ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : str , ) -> List[str]:
lowerCamelCase__ : Optional[Any] = FalconForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : int = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , ) -> List[str]:
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : str = FalconForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
lowerCamelCase__ : List[str] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
lowerCamelCase__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase__ : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
lowerCamelCase__ : Tuple = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[str] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = FalconModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A_ ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ , *lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase__ : List[Any] = alibi
self.model_tester.create_and_check_model(UpperCAmelCase , *UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = input_dict['input_ids']
lowerCamelCase__ : Dict = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : int = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : List[str] = 'single_label_classification'
lowerCamelCase__ : Optional[int] = input_dict['input_ids']
lowerCamelCase__ : Union[str, Any] = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Any ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[str] = input_dict['input_ids']
lowerCamelCase__ : Optional[Any] = FalconForCausalLM(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : int = input_ids.shape[0]
lowerCamelCase__ : str = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase__ : Optional[Any] = model._convert_cache_to_standard_format(UpperCAmelCase , UpperCAmelCase )
for layer in range(len(UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A_ ( self : List[str] ) -> str:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = 3
lowerCamelCase__ : List[str] = 'multi_label_classification'
lowerCamelCase__ : Optional[int] = input_dict['input_ids']
lowerCamelCase__ : Tuple = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : Any = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Any ) -> Tuple:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase , 'use_cache' ):
return
lowerCamelCase__ : Dict = model_class(UpperCAmelCase ).to(UpperCAmelCase )
if "use_cache" not in inputs:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Any = model(**UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase__ : Any = (
getattr(UpperCAmelCase , 'decoder_layers' , UpperCAmelCase )
or getattr(UpperCAmelCase , 'num_decoder_layers' , UpperCAmelCase )
or config.num_hidden_layers
)
lowerCamelCase__ : Dict = getattr(UpperCAmelCase , 'num_kv_heads' , config.num_attention_heads )
lowerCamelCase__ : Dict = getattr(UpperCAmelCase , 'd_model' , config.hidden_size )
lowerCamelCase__ : str = embed_dim // num_attention_heads
lowerCamelCase__ : List[Any] = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Any = inputs['input_ids'].shape
for i in range(UpperCAmelCase ):
if config.new_decoder_architecture:
lowerCamelCase__ : int = config.num_attention_heads
elif config.multi_query:
lowerCamelCase__ : Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : int ) -> Union[str, Any]:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCamelCase__ : List[Any] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCamelCase__ : Optional[Any] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=19 )
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def A_ ( self : Any ) -> int:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : int = FalconForCausalLM.from_pretrained(UpperCAmelCase )
model.eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4 )
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4 )
model.generate(**UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def A_ ( self : Union[str, Any] ) -> str:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = FalconForCausalLM.from_pretrained(UpperCAmelCase )
model.eval()
model.to(device=UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
# Test results are the same with and without cache
lowerCamelCase__ : List[str] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=20 , use_cache=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=20 , use_cache=UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 295 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
A_ = jnp.floataa
A_ = True
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
super().setup()
_lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *A_ , **A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = super().__call__(*A_ , **A_ )
_lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = FlaxBigBirdForNaturalQuestionsModule
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
def cross_entropy(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
_lowerCamelCase = logits.shape[-1]
_lowerCamelCase = (labels[..., None] == jnp.arange(__UpperCAmelCase )[None]).astype('''f4''' )
_lowerCamelCase = jax.nn.log_softmax(__UpperCAmelCase , axis=-1 )
_lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_lowerCamelCase = reduction(__UpperCAmelCase )
return loss
_lowerCamelCase = partial(__UpperCAmelCase , reduction=jnp.mean )
_lowerCamelCase = cross_entropy(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = cross_entropy(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = cross_entropy(__UpperCAmelCase , __UpperCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 'google/bigbird-roberta-base'
A_ = 3_000
A_ = 10_500
A_ = 128
A_ = 3
A_ = 1
A_ = 5
# tx_args
A_ = 3E-5
A_ = 0.0
A_ = 20_000
A_ = 0.00_95
A_ = 'bigbird-roberta-natural-questions'
A_ = 'training-expt'
A_ = 'data/nq-training.jsonl'
A_ = 'data/nq-validation.jsonl'
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=A_ )
_lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
_lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 4_096 # no dynamic padding on TPUs
def __call__( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.collate_fn(A_ )
_lowerCamelCase = jax.tree_util.tree_map(A_ , A_ )
return batch
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.fetch_inputs(features['''input_ids'''] )
_lowerCamelCase = {
'''input_ids''': jnp.array(A_ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(A_ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = [self._fetch_inputs(A_ ) for ids in input_ids]
return zip(*A_ )
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = [1 for _ in range(len(A_ ) )]
while len(A_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if seed is not None:
_lowerCamelCase = dataset.shuffle(seed=__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) // batch_size ):
_lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__UpperCAmelCase )
@partial(jax.pmap , axis_name='''batch''' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
def loss_fn(__UpperCAmelCase ):
_lowerCamelCase = model_inputs.pop('''start_labels''' )
_lowerCamelCase = model_inputs.pop('''end_labels''' )
_lowerCamelCase = model_inputs.pop('''pooled_labels''' )
_lowerCamelCase = state.apply_fn(**__UpperCAmelCase , params=__UpperCAmelCase , dropout_rng=__UpperCAmelCase , train=__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = outputs
return state.loss_fn(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
_lowerCamelCase , _lowerCamelCase = jax.random.split(__UpperCAmelCase )
_lowerCamelCase = jax.value_and_grad(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = grad_fn(state.params )
_lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_lowerCamelCase = jax.lax.pmean(__UpperCAmelCase , '''batch''' )
_lowerCamelCase = state.apply_gradients(grads=__UpperCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __magic_name__( __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = model_inputs.pop('''start_labels''' )
_lowerCamelCase = model_inputs.pop('''end_labels''' )
_lowerCamelCase = model_inputs.pop('''pooled_labels''' )
_lowerCamelCase = state.apply_fn(**__UpperCAmelCase , params=state.params , train=__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = outputs
_lowerCamelCase = state.loss_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class UpperCamelCase ( train_state.TrainState ):
'''simple docstring'''
A_ = struct.field(pytree_node=__lowercase )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = None
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_=None ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = model.params
_lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=A_ , tx=A_ , loss_fn=A_ , )
if ckpt_dir is not None:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = restore_checkpoint(A_ , A_ )
_lowerCamelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_lowerCamelCase , _lowerCamelCase = build_tx(**A_ )
_lowerCamelCase = train_state.TrainState(
step=A_ , apply_fn=model.__call__ , params=A_ , tx=A_ , opt_state=A_ , )
_lowerCamelCase = args
_lowerCamelCase = data_collator
_lowerCamelCase = lr
_lowerCamelCase = params
_lowerCamelCase = jax_utils.replicate(A_ )
return state
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.args
_lowerCamelCase = len(A_ ) // args.batch_size
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(A_ , jax.device_count() )
for epoch in range(args.max_epochs ):
_lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
_lowerCamelCase = get_batched_dataset(A_ , args.batch_size , seed=A_ )
_lowerCamelCase = 0
for batch in tqdm(A_ , total=A_ , desc=F'Running EPOCH-{epoch}' ):
_lowerCamelCase = self.data_collator(A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.train_step_fn(A_ , A_ , **A_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_lowerCamelCase = jax_utils.unreplicate(state.step )
_lowerCamelCase = running_loss.item() / i
_lowerCamelCase = self.scheduler_fn(state_step - 1 )
_lowerCamelCase = self.evaluate(A_ , A_ )
_lowerCamelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(A_ ) )
self.logger.log(A_ , commit=A_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=A_ )
def UpperCamelCase_ ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = get_batched_dataset(A_ , self.args.batch_size )
_lowerCamelCase = len(A_ ) // self.args.batch_size
_lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
_lowerCamelCase = 0
for batch in tqdm(A_ , total=A_ , desc='''Evaluating ... ''' ):
_lowerCamelCase = self.data_collator(A_ )
_lowerCamelCase = self.val_step_fn(A_ , **A_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def UpperCamelCase_ ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = jax_utils.unreplicate(A_ )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=''' ... ''' )
self.model_save_fn(A_ , params=state.params )
with open(os.path.join(A_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(A_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(A_ , '''data_collator.joblib''' ) )
with open(os.path.join(A_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , A_ )
print('''DONE''' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=''' ... ''' )
with open(os.path.join(__UpperCAmelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(__UpperCAmelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_lowerCamelCase = from_bytes(state.opt_state , f.read() )
_lowerCamelCase = joblib.load(os.path.join(__UpperCAmelCase , '''args.joblib''' ) )
_lowerCamelCase = joblib.load(os.path.join(__UpperCAmelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__UpperCAmelCase , '''training_state.json''' ) , '''r''' ) as f:
_lowerCamelCase = json.load(__UpperCAmelCase )
_lowerCamelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = num_train_steps - warmup_steps
_lowerCamelCase = optax.linear_schedule(init_value=__UpperCAmelCase , end_value=__UpperCAmelCase , transition_steps=__UpperCAmelCase )
_lowerCamelCase = optax.linear_schedule(init_value=__UpperCAmelCase , end_value=1E-7 , transition_steps=__UpperCAmelCase )
_lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def weight_decay_mask(__UpperCAmelCase ):
_lowerCamelCase = traverse_util.flatten_dict(__UpperCAmelCase )
_lowerCamelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(__UpperCAmelCase )
_lowerCamelCase = scheduler_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = optax.adamw(learning_rate=__UpperCAmelCase , weight_decay=__UpperCAmelCase , mask=__UpperCAmelCase )
return tx, lr
| 707 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ : int = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 588 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = """hf-internal-testing/tiny-random-t5"""
lowercase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
lowercase = tokenizer("""This is me""" , return_tensors="""pt""" )
lowercase = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase = model.generate(**_lowerCAmelCase )
lowercase = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase = model_reloaded.generate(**_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """hf-internal-testing/tiny-random-t5"""
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
lowercase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCAmelCase ):
model.save_pretrained(_lowerCAmelCase )
lowercase = model.reverse_bettertransformer()
model.save_pretrained(_lowerCAmelCase )
| 588 | 1 |
"""simple docstring"""
from itertools import product
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = sides_number
A = max_face_number * dice_number
A = [0] * (max_total + 1)
A = 1
A = range(UpperCamelCase__ , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase__ , repeat=UpperCamelCase__ ):
A = sum(UpperCamelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def __snake_case ( ) -> float:
"""simple docstring"""
A = total_frequency_distribution(
sides_number=4 , dice_number=9 )
A = total_frequency_distribution(
sides_number=6 , dice_number=6 )
A = 0
A = 9
A = 4 * 9
A = 6
for peter_total in range(UpperCamelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A = (4**9) * (6**6)
A = peter_wins_count / total_games_number
A = round(UpperCamelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Dict = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 91 | 1 |
from bisect import bisect
from itertools import accumulate
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = sorted(zip(__UpperCamelCase ,__UpperCamelCase ) ,key=lambda __UpperCamelCase : x[0] / x[1] ,reverse=__UpperCamelCase )
A_ , A_ = [i[0] for i in r], [i[1] for i in r]
A_ = list(accumulate(__UpperCamelCase ) )
A_ = bisect(__UpperCamelCase ,__UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 720 |
from __future__ import annotations
__SCREAMING_SNAKE_CASE ={
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __magic_name__ :
'''simple docstring'''
def __init__( self: List[Any] , _lowerCamelCase: dict[str, list[str]] , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = source_vertex
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = {self.source_vertex}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = vertex
queue.append(_lowerCamelCase )
def _A ( self: List[str] , _lowerCamelCase: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE_ = self.parent.get(_lowerCamelCase )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE_ = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_lowerCamelCase )
return self.shortest_path(_lowerCamelCase ) + f"->{target_vertex}"
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : Any = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = ['''OwlViTFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 387 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def lowercase_ ( *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> List[str]:
pass
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_SCREAMING_SNAKE_CASE : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
a = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
SCREAMING_SNAKE_CASE__ = '''What is the placebo?'''
SCREAMING_SNAKE_CASE__ = [
{
'''image''': load_image(__lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowercase_ ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''How many cats are there?'''
SCREAMING_SNAKE_CASE__ = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
| 493 | 0 |
import math
from datetime import datetime, timedelta
def _snake_case ( __snake_case ) -> datetime:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = year % 1_9
UpperCAmelCase_ : List[str] = year % 4
UpperCAmelCase_ : str = year % 7
UpperCAmelCase_ : Dict = math.floor(year / 1_0_0 )
UpperCAmelCase_ : int = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
UpperCAmelCase_ : Tuple = leap_day_inhibits / 4
UpperCAmelCase_ : Union[str, Any] = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
UpperCAmelCase_ : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase_ : Tuple = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
UpperCAmelCase_ : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(__snake_case , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(__snake_case , 4 , 1_8 )
else:
return datetime(__snake_case , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__lowerCamelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 455 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
class snake_case_ (lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase = 1
@register_to_config
def __init__( self ,lowercase = 2000 ,lowercase = 0.15 ,lowercase = 0.01 ,lowercase = 1348.0 ,lowercase = 1E-5 ,lowercase = 1 ,):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sigma_max
# setable values
UpperCAmelCase_ : Optional[int] = None
self.set_sigmas(lowercase ,lowercase ,lowercase ,lowercase)
def A_ ( self ,lowercase ,lowercase = None):
"""simple docstring"""
return sample
def A_ ( self ,lowercase ,lowercase = None ,lowercase = None):
"""simple docstring"""
UpperCAmelCase_ : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase_ : List[Any] = torch.linspace(1 ,lowercase ,lowercase ,device=lowercase)
def A_ ( self ,lowercase ,lowercase = None ,lowercase = None ,lowercase = None):
"""simple docstring"""
UpperCAmelCase_ : Any = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase_ : int = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase_ : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase ,lowercase)
UpperCAmelCase_ : Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase_ : Optional[int] = torch.exp(torch.linspace(math.log(lowercase) ,math.log(lowercase) ,lowercase))
UpperCAmelCase_ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def A_ ( self ,lowercase ,lowercase ,lowercase ,lowercase = None ,lowercase = True ,):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
UpperCAmelCase_ : Optional[int] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase_ : Tuple = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase_ : Optional[int] = timesteps.to(self.discrete_sigmas.device)
UpperCAmelCase_ : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
UpperCAmelCase_ : Optional[Any] = self.get_adjacent_sigma(lowercase ,lowercase).to(sample.device)
UpperCAmelCase_ : Any = torch.zeros_like(lowercase)
UpperCAmelCase_ : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase_ : Dict = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
UpperCAmelCase_ : List[str] = diffusion.unsqueeze(-1)
UpperCAmelCase_ : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase_ : Union[str, Any] = randn_tensor(
sample.shape ,layout=sample.layout ,generator=lowercase ,device=sample.device ,dtype=sample.dtype)
UpperCAmelCase_ : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase_ : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase ,prev_sample_mean=lowercase)
def A_ ( self ,lowercase ,lowercase ,lowercase = None ,lowercase = True ,):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase_ : int = randn_tensor(sample.shape ,layout=sample.layout ,generator=lowercase).to(sample.device)
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase_ : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
UpperCAmelCase_ : Optional[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
UpperCAmelCase_ : List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase_ : Optional[Any] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase_ : Any = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
UpperCAmelCase_ : Tuple = step_size.unsqueeze(-1)
UpperCAmelCase_ : Dict = sample + step_size * model_output
UpperCAmelCase_ : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase)
def A_ ( self ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Any = timesteps.to(original_samples.device)
UpperCAmelCase_ : List[str] = self.discrete_sigmas.to(original_samples.device)[timesteps]
UpperCAmelCase_ : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase) * sigmas[:, None, None, None]
)
UpperCAmelCase_ : Tuple = noise + original_samples
return noisy_samples
def __len__( self):
"""simple docstring"""
return self.config.num_train_timesteps
| 455 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE (enum.Enum ):
"""simple docstring"""
_a : Tuple = 0
_a : List[str] = 1
_a : int = 2
@add_end_docstrings(__A )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : str = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a_ = None
if self.model.config.prefix is not None:
a_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a_ , a_ , a_ = self._sanitize_parameters(prefix=UpperCamelCase__ , **self._forward_params )
a_ = {**self._preprocess_params, **preprocess_params}
a_ = {**self._forward_params, **forward_params}
def _a ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = {}
if prefix is not None:
a_ = prefix
if prefix:
a_ = self.tokenizer(
UpperCamelCase__ , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
a_ = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
a_ = handle_long_generation
preprocess_params.update(UpperCamelCase__ )
a_ = generate_kwargs
a_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
a_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
a_ = ReturnType.TENSORS
if return_type is not None:
a_ = return_type
if clean_up_tokenization_spaces is not None:
a_ = clean_up_tokenization_spaces
if stop_sequence is not None:
a_ = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
a_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*UpperCamelCase__ , **UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
a_ = prompt_text
if handle_long_generation == "hole":
a_ = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
a_ = generate_kwargs['max_new_tokens']
else:
a_ = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
a_ = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
a_ = inputs['attention_mask'][:, -keep_length:]
return inputs
def _a ( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
a_ = model_inputs['input_ids']
a_ = model_inputs.get('attention_mask' , UpperCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a_ = None
a_ = None
a_ = 1
else:
a_ = input_ids.shape[0]
a_ = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a_ = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
a_ = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
a_ = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a_ = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a_ = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ )
a_ = generated_sequence.shape[0]
if self.framework == "pt":
a_ = generated_sequence.reshape(UpperCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a_ = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self , UpperCamelCase__ , UpperCamelCase__=ReturnType.FULL_TEXT , UpperCamelCase__=True ):
"""simple docstring"""
a_ = model_outputs['generated_sequence'][0]
a_ = model_outputs['input_ids']
a_ = model_outputs['prompt_text']
a_ = generated_sequence.numpy().tolist()
a_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a_ = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a_ = self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a_ = 0
else:
a_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a_ = prompt_text + text[prompt_length:]
else:
a_ = text[prompt_length:]
a_ = {'generated_text': all_text}
records.append(UpperCamelCase__ )
return records
| 536 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=__a ):
lowerCAmelCase__ = ['transformers', 'torch', 'note_seq']
def __init__( self: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(self ,["transformers", "torch", "note_seq"] )
@classmethod
def _lowercase ( cls: Union[str, Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
requires_backends(cls ,["transformers", "torch", "note_seq"] )
@classmethod
def _lowercase ( cls: List[str] ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: str ):
'''simple docstring'''
requires_backends(cls ,["transformers", "torch", "note_seq"] )
| 700 |
"""simple docstring"""
_lowerCAmelCase : List[Any] = 256
# Modulus to hash a string
_lowerCAmelCase : Tuple = 100_0003
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = len(_lowerCamelCase )
_lowerCamelCase : Dict = len(_lowerCamelCase )
if p_len > t_len:
return False
_lowerCamelCase : Any = 0
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCamelCase ):
_lowerCamelCase : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowerCamelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowerCamelCase : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowerCamelCase : str = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[str] = "abc1abc12"
_lowerCamelCase : str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_lowerCamelCase : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase ) and not rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 2)
_lowerCamelCase : Optional[int] = "ABABX"
_lowerCamelCase : Dict = "ABABZABABYABABX"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 3)
_lowerCamelCase : Optional[int] = "AAAB"
_lowerCamelCase : Tuple = "ABAAAAAB"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 4)
_lowerCamelCase : Dict = "abcdabcy"
_lowerCamelCase : str = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 5)
_lowerCamelCase : List[Any] = "Lü"
_lowerCamelCase : List[str] = "Lüsai"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = "Lue"
assert not rabin_karp(_lowerCamelCase , _lowerCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 386 | 0 |
def A__ ( lowercase: list, lowercase: int, lowercase: int = 0, lowercase: int = 0 ) -> int:
A : List[str] =right or len(lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase, lowercase, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 88 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "geglu" , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Optional[int]:
super().__init__()
A : Tuple =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , cross_attention_dim=SCREAMING_SNAKE_CASE__ , attention_bias=SCREAMING_SNAKE_CASE__ , sample_size=SCREAMING_SNAKE_CASE__ , num_vector_embeds=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A : List[Any] =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A : str =[77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A : Optional[int] =[1, 0]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Dict:
A : Any =hidden_states
A : int =[]
A : str =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A : Optional[int] =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A : str =self.transformer_index_for_condition[i]
A : str =self.transformers[transformer_index](
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , cross_attention_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A : str =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A : Any =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE__ )
| 305 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A : Any = ['''text''', '''image''', '''audio''']
def lowercase ( __snake_case : List[str] ):
lowercase_ : Optional[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(__snake_case , __snake_case ):
inputs.append(create_inputs(__snake_case ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowercase ( __snake_case : List ):
lowercase_ : List[Any] = []
for output in outputs:
if isinstance(__snake_case , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__snake_case , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__snake_case , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _UpperCAmelCase :
def A ( self : Optional[Any] ) -> Union[str, Any]:
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : List[str] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def A ( self : Any ) -> Dict:
lowercase_ : List[Any] = create_inputs(self.tool.inputs )
lowercase_ : Optional[Any] = self.tool(*A )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(A ) , self.tool.outputs )
def A ( self : Union[str, Any] ) -> Tuple:
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*A )
if not isinstance(A , A ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(A ) , len(self.tool.outputs ) )
for output, output_type in zip(A , self.tool.outputs ):
lowercase_ : Union[str, Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(A , A ) )
def A ( self : List[Any] ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = []
for _input, input_type in zip(A , self.tool.inputs ):
if isinstance(A , A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*A )
if not isinstance(A , A ):
lowercase_ : Union[str, Any] = [outputs]
self.assertEqual(len(A ) , len(self.tool.outputs ) )
| 714 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCAmelCase ( _A ):
def __init__( self : Optional[int] , A : Optional[Any]=0.01 , A : int=10_00 ) -> Optional[int]:
lowercase_ : Dict = p_stop
lowercase_ : Optional[Any] = max_length
def __iter__( self : Dict ) -> Dict:
lowercase_ : str = 0
lowercase_ : Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase_ : List[str] = random.random() < self.p_stop
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : List[Any] , A : Any , A : Union[str, Any] , A : Optional[Any]=False , A : Dict=True ) -> str:
lowercase_ : Tuple = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
lowercase_ : Optional[Any] = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def A ( self : Dict ) -> Tuple:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
lowercase_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [[], []]
self.check_batch_sampler_shards(A , A )
def A ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def A ( self : str ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def A ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def A ( self : str ) -> str:
lowercase_ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase_ : Tuple = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A ( self : Union[str, Any] , A : Union[str, Any] , A : Tuple , A : Dict , A : str=False , A : Any=2 , A : Optional[int]=False ) -> Optional[Any]:
random.seed(A )
lowercase_ : Any = list(A )
lowercase_ : Optional[int] = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
lowercase_ : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
lowercase_ : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
lowercase_ : Optional[int] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : int = 42
lowercase_ : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
lowercase_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=A )
lowercase_ : int = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> Union[str, Any]:
lowercase_ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : Dict ) -> int:
lowercase_ : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase_ : Union[str, Any] = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> str:
lowercase_ : Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A ( self : Optional[Any] ) -> Optional[int]:
Accelerator()
lowercase_ : Tuple = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 141 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Any = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : Union[str, Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[str] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = SqueezeBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case_ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case_ ) != tokenize_chinese_chars
):
_A = getattr(snake_case_ , normalizer_state.pop('type' ) )
_A = do_lower_case
_A = strip_accents
_A = tokenize_chinese_chars
_A = normalizer_class(**snake_case_ )
_A = do_lower_case
def lowerCAmelCase__ ( self , snake_case_ , snake_case_=None ):
_A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
_A = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 27 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCAmelCase : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple[int, int]:
def constraint_to_multiple_of(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0 , _UpperCAmelCase=None ):
lowerCamelCase__ : List[str] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCamelCase__ : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCamelCase__ : List[Any] = math.ceil(val / multiple ) * multiple
return x
lowerCamelCase__ : Dict = (output_size, output_size) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else output_size
lowerCamelCase__ , lowerCamelCase__ : str = get_image_size(_UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = output_size
# determine new height and width
lowerCamelCase__ : List[str] = output_height / input_height
lowerCamelCase__ : str = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCamelCase__ : Optional[int] = scale_width
else:
# fit height
lowerCamelCase__ : Tuple = scale_height
lowerCamelCase__ : str = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCAmelCase )
lowerCamelCase__ : Any = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCAmelCase )
return (new_height, new_width)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = False , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Optional[int] , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : Optional[int] = size if size is not None else {'height': 384, 'width': 384}
lowerCamelCase__ : Optional[Any] = get_size_dict(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : List[Any] = keep_aspect_ratio
lowerCamelCase__ : Dict = ensure_multiple_of
lowerCamelCase__ : str = resample
lowerCamelCase__ : int = do_rescale
lowerCamelCase__ : Any = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : bool = False , UpperCAmelCase : int = 1 , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
lowerCamelCase__ : Tuple = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCamelCase__ : List[Any] = get_resize_output_image_size(
UpperCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=UpperCAmelCase , multiple=UpperCAmelCase , )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ) -> str:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Union[str, Any] , ) -> PIL.Image.Image:
lowerCamelCase__ : int = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else self.size
lowerCamelCase__ : List[str] = get_size_dict(UpperCAmelCase )
lowerCamelCase__ : int = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCamelCase__ : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCamelCase__ : List[str] = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : int = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ : Dict = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : int = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase__ : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase__ : Any = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Tuple] = None ) -> Dict:
lowerCamelCase__ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCAmelCase ):
lowerCamelCase__ : List[str] = target_sizes.numpy()
lowerCamelCase__ : Tuple = []
for idx in range(len(UpperCAmelCase ) ):
lowerCamelCase__ : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase )
lowerCamelCase__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase )
else:
lowerCamelCase__ : Dict = logits.argmax(dim=1 )
lowerCamelCase__ : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 295 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
__snake_case = '''vit_msn'''
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-06 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , **__lowerCamelCase , ) -> str:
super().__init__(**_A )
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Dict = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = initializer_range
_SCREAMING_SNAKE_CASE : str = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : Any = patch_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_channels
_SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ ={
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 381 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase="pt" ):
__a = {'add_prefix_space': True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(' ' ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding='max_length' if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , ):
__a = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="train" , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="" , ) -> List[str]:
super().__init__()
__a = Path(UpperCAmelCase ).joinpath(type_path + '.source' )
__a = Path(UpperCAmelCase ).joinpath(type_path + '.target' )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ) -> Optional[Any]:
return len(self.src_lens )
def __getitem__( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , UpperCAmelCase ).rstrip('\n' )
__a = linecache.getline(str(self.tgt_file ) , UpperCAmelCase ).rstrip('\n' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer
__a = encode_line(UpperCAmelCase , UpperCAmelCase , self.max_source_length , 'right' )
__a = encode_line(UpperCAmelCase , UpperCAmelCase , self.max_target_length , 'right' )
__a = source_inputs['input_ids'].squeeze()
__a = target_inputs['input_ids'].squeeze()
__a = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase ) -> Optional[Any]:
return [len(UpperCAmelCase ) for x in Path(UpperCAmelCase ).open().readlines()]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
__a = torch.stack([x['input_ids'] for x in batch] )
__a = torch.stack([x['attention_mask'] for x in batch] )
__a = torch.stack([x['decoder_input_ids'] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(UpperCAmelCase , UpperCAmelCase )
__a , __a = trim_batch(UpperCAmelCase , UpperCAmelCase , attention_mask=UpperCAmelCase )
__a = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCamelCase_ : Tuple = getLogger(__name__)
def lowerCAmelCase( __lowerCamelCase ):
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def lowerCAmelCase( __lowerCamelCase ):
__a = get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , 'git_log.json' ) )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=4 , **__lowerCamelCase ):
with open(__lowerCamelCase , 'w' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def lowerCAmelCase( ):
__a = git.Repo(search_parent_directories=__lowerCamelCase )
__a = {
'repo_id': str(__lowerCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
with open(__lowerCamelCase , 'wb' ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
def remove_articles(__lowerCamelCase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __lowerCamelCase )
def white_space_fix(__lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = normalize_answer(__lowerCamelCase ).split()
__a = normalize_answer(__lowerCamelCase ).split()
__a = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(__lowerCamelCase )
__a = 1.0 * num_same / len(__lowerCamelCase )
__a = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
__a = 0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def lowerCAmelCase( __lowerCamelCase ):
return model_prefix.startswith('rag' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = 'dropout_rate'
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
__a = p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 559 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
class a__ ( __snake_case ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 559 | 1 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A = 'src/diffusers'
# Matches is_xxx_available()
A = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
A = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
A = '\n{0} = None\n'
A = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
A = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _lowerCamelCase( lowerCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def _lowerCamelCase( ):
'''simple docstring'''
with open(os.path.join(__lowerCAmelCase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : int = f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE_ : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE_ : Any = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = lines[line_index]
SCREAMING_SNAKE_CASE_ : List[str] = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ : str = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCamelCase( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase( lowerCAmelCase__ : Optional[int]=None ):
'''simple docstring'''
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE_ : List[Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE_ : int = {}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE_ : str = '[' + ', '.join(F'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
SCREAMING_SNAKE_CASE_ : List[str] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
SCREAMING_SNAKE_CASE_ : List[str] = dummy_file
return dummy_files
def _lowerCamelCase( lowerCAmelCase__ : List[str]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE_ : str = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(__lowerCAmelCase , 'utils' )
SCREAMING_SNAKE_CASE_ : Dict = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE_ : Any = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
else:
SCREAMING_SNAKE_CASE_ : Tuple = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 709 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
A = logging.get_logger(__name__)
def _lowerCamelCase( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, Iterable[int]] , lowerCAmelCase__ : bool , lowerCAmelCase__ : int ):
'''simple docstring'''
def constraint_to_multiple_of(lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Any=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE_ : Dict = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE_ : str = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_image_size(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE_ : Optional[int] = output_height / input_height
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE_ : str = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE_ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ )
return (new_height, new_width)
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = False , UpperCamelCase__ = 1 , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = size if size is not None else {'height': 384, 'width': 384}
SCREAMING_SNAKE_CASE_ : str = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = do_resize
SCREAMING_SNAKE_CASE_ : str = size
SCREAMING_SNAKE_CASE_ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : Tuple = ensure_multiple_of
SCREAMING_SNAKE_CASE_ : Optional[int] = resample
SCREAMING_SNAKE_CASE_ : int = do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = 1 , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['height'], size['width']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : str = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : str = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Tuple = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : str = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ : Tuple = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 97 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 255 , A_=True , )-> int:
'''simple docstring'''
UpperCamelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , A_ , A_=False )-> Union[str, Any]:
'''simple docstring'''
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size['shortest_edge'] * h / w )
UpperCamelCase = self.size['shortest_edge']
elif w > h:
UpperCamelCase = self.size['shortest_edge']
UpperCamelCase = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase = self.size['shortest_edge']
UpperCamelCase = self.size['shortest_edge']
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
UpperCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , A_ )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'image_id': 39769, 'annotations': target}
# encode them
UpperCamelCase = DeformableDetrImageProcessor()
UpperCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='pt' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
UpperCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
UpperCamelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCamelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
UpperCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
UpperCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
| 3 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_a = random.Random()
def lowerCAmelCase__(__snake_case ,__snake_case=1.0 ,__snake_case=None ,__snake_case=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
lowerCamelCase__ = global_rng
lowerCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=2_0_0_0 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_4_1_0_0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = min_seq_length
lowerCamelCase__ = max_seq_length
lowerCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ = spectrogram_length
lowerCamelCase__ = feature_size
lowerCamelCase__ = num_audio_channels
lowerCamelCase__ = hop_length
lowerCamelCase__ = chunk_length
lowerCamelCase__ = sampling_rate
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowerCamelCase ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
def _flatten(__lowerCAmelCase ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
lowerCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = TvltFeatureExtractor
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TvltFeatureExtractionTester(self )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''sampling_rate''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
lowerCamelCase__ = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = feat_extract_first.to_dict()
lowerCamelCase__ = feat_extract_second.to_dict()
lowerCamelCase__ = dict_first.pop('''mel_filters''' )
lowerCamelCase__ = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(__lowerCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__lowerCAmelCase )
lowerCamelCase__ = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
lowerCamelCase__ = feat_extract_first.to_dict()
lowerCamelCase__ = feat_extract_second.to_dict()
lowerCamelCase__ = dict_first.pop('''mel_filters''' )
lowerCamelCase__ = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase__ = np.asarray(__lowerCAmelCase )
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase__ = ds.sort('''id''' ).select(range(__lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self._load_datasamples(1 )
lowerCamelCase__ = TvltFeatureExtractor()
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
lowerCamelCase__ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCAmelCase , atol=1E-4 ) )
| 481 | 0 |
import random
class a :
"""simple docstring"""
@staticmethod
def __snake_case ( lowerCamelCase : str ) -> tuple[list[int], list[int]]:
__snake_case : int = [ord(lowerCamelCase ) for i in text]
__snake_case : int = []
__snake_case : str = []
for i in plain:
__snake_case : Union[str, Any] = random.randint(1 , 300 )
__snake_case : Dict = (i + k) * k
cipher.append(lowerCamelCase )
key.append(lowerCamelCase )
return cipher, key
@staticmethod
def __snake_case ( lowerCamelCase : list[int] , lowerCamelCase : list[int] ) -> str:
__snake_case : List[Any] = []
for i in range(len(lowerCamelCase ) ):
__snake_case : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase ) )
return "".join(lowerCamelCase )
if __name__ == "__main__":
_snake_case : int = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 718 |
from __future__ import annotations
_snake_case : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : dict[str, list[str]] , lowerCamelCase : str ) -> None:
__snake_case : Tuple = graph
# mapping node to its parent in resulting breadth first tree
__snake_case : dict[str, str | None] = {}
__snake_case : Dict = source_vertex
def __snake_case ( self : Optional[int] ) -> None:
__snake_case : Dict = {self.source_vertex}
__snake_case : List[str] = None
__snake_case : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
__snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase )
__snake_case : Any = vertex
queue.append(lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : str ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case : Optional[Any] = self.parent.get(lowerCamelCase )
if target_vertex_parent is None:
__snake_case : Optional[Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowerCamelCase )
return self.shortest_path(lowerCamelCase ) + F'->{target_vertex}'
if __name__ == "__main__":
_snake_case : Optional[Any] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 203 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_UpperCAmelCase , vae=_UpperCAmelCase , scheduler=_UpperCAmelCase )
# create a imagenet -> id dictionary for easier use
a_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
a_ = int(_UpperCAmelCase )
a_ = dict(sorted(self.labels.items() ) )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = list(_UpperCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = 4.0 , _UpperCAmelCase = None , _UpperCAmelCase = 50 , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , ):
"""simple docstring"""
a_ = len(_UpperCAmelCase )
a_ = self.transformer.config.sample_size
a_ = self.transformer.config.in_channels
a_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_UpperCAmelCase , device=self.device , dtype=self.transformer.dtype , )
a_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
a_ = torch.tensor(_UpperCAmelCase , device=self.device ).reshape(-1 )
a_ = torch.tensor([1_000] * batch_size , device=self.device )
a_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
a_ = latent_model_input[: len(_UpperCAmelCase ) // 2]
a_ = torch.cat([half, half] , dim=0 )
a_ = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
a_ = t
if not torch.is_tensor(_UpperCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a_ = latent_model_input.device.type == """mps"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = torch.floataa if is_mps else torch.floataa
else:
a_ = torch.intaa if is_mps else torch.intaa
a_ = torch.tensor([timesteps] , dtype=_UpperCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
a_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
a_ = self.transformer(
_UpperCAmelCase , timestep=_UpperCAmelCase , class_labels=_UpperCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
a_ , a_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a_ , a_ = torch.split(_UpperCAmelCase , len(_UpperCAmelCase ) // 2 , dim=0 )
a_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a_ = torch.cat([half_eps, half_eps] , dim=0 )
a_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a_ , a_ = torch.split(_UpperCAmelCase , _UpperCAmelCase , dim=1 )
else:
a_ = noise_pred
# compute previous image: x_t -> x_t-1
a_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
if guidance_scale > 1:
a_ , a_ = latent_model_input.chunk(2 , dim=0 )
else:
a_ = latent_model_input
a_ = 1 / self.vae.config.scaling_factor * latents
a_ = self.vae.decode(_UpperCAmelCase ).sample
a_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 483 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize ):
"""simple docstring"""
a_ = """bilinear"""
a_ = max_size
a_ = short_edge_length
def __call__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = []
for img in imgs:
a_ , a_ = img.shape[:2]
# later: provide list and randomly choose index for resize
a_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
a_ = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
a_ , a_ = size, scale * w
else:
a_ , a_ = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase ) > self.max_size:
a_ = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase )
a_ = newh * scale
a_ = neww * scale
a_ = int(neww + 0.5 )
a_ = int(newh + 0.5 )
if img.dtype == np.uinta:
a_ = Image.fromarray(_UpperCAmelCase )
a_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
a_ = np.asarray(_UpperCAmelCase )
else:
a_ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
a_ = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase ).squeeze(0 )
img_augs.append(_UpperCAmelCase )
return img_augs
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
a_ = cfg.INPUT.FORMAT
a_ = cfg.SIZE_DIVISIBILITY
a_ = cfg.PAD_VALUE
a_ = cfg.INPUT.MAX_SIZE_TEST
a_ = cfg.MODEL.DEVICE
a_ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a_ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a_ = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
a_ = [im.shape[-2:] for im in images]
a_ = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase )
]
return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = [images]
if single_image:
assert len(_UpperCAmelCase ) == 1
for i in range(len(_UpperCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
a_ = torch.tensor([im.shape[:2] for im in images] )
a_ = self.aug(_UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a_ = [self.normalizer(_UpperCAmelCase ) for x in images]
# now pad them to do the following operations
a_ , a_ = self.pad(_UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a_ = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
a_ , a_ = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCAmelCase__ )
| 483 | 1 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase__ : List[Any] = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[Any] ):
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = False
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: str ):
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : str = False
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=_lowerCamelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=_lowerCamelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=_lowerCamelCase , type=_lowerCamelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=_lowerCamelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=_lowerCamelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=_lowerCamelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=_lowerCamelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=_lowerCamelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=_lowerCamelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=_lowerCamelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=_lowerCamelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=_lowerCamelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=_lowerCamelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=_lowerCamelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=_lowerCamelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=_lowerCamelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=_lowerCamelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=_lowerCamelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_lowerCamelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=_lowerCamelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_lowerCamelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=_lowerCamelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=_lowerCamelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=_lowerCamelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=_lowerCamelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_lowerCamelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=_lowerCamelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=_lowerCamelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=_lowerCamelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=_lowerCamelCase , default=5_00 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=_lowerCamelCase , default=40_00 , help="""Checkpoint interval.""" )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(_lowerCamelCase )
# ARGS #
init_gpu_params(_lowerCamelCase )
set_seed(_lowerCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(_lowerCamelCase ) , _lowerCamelCase , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : str = tokenizer.all_special_tokens.index(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
__SCREAMING_SNAKE_CASE : Dict = special_tok_ids
__SCREAMING_SNAKE_CASE : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
__SCREAMING_SNAKE_CASE : List[Any] = pickle.load(_lowerCamelCase )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.maximum(_lowerCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Optional[int] = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[Any] = LmSeqsDataset(params=_lowerCamelCase , data=_lowerCamelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
__SCREAMING_SNAKE_CASE : List[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Tuple = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
__SCREAMING_SNAKE_CASE : int = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class(_lowerCamelCase )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
__SCREAMING_SNAKE_CASE : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowerCamelCase )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowerCamelCase , _lowerCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowerCamelCase , _lowerCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : Dict = Distiller(
params=_lowerCamelCase , dataset=_lowerCamelCase , token_probs=_lowerCamelCase , student=_lowerCamelCase , teacher=_lowerCamelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 178 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 178 | 1 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = str(__a )
return len(__a ) == 9 and set(__a ) == set("123456789" )
def __snake_case ( ):
for base_num in range(9999 , 4999 , -1 ):
lowerCamelCase_ = 100002 * base_num
if is_9_pandigital(__a ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCamelCase_ = 1002003 * base_num
if is_9_pandigital(__a ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 675 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ = logging.get_logger(__name__)
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , *A , **A ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , A , )
super().__init__(*A , **A )
| 437 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCAmelCase__ : str = [144, 192, 240]
UpperCAmelCase__ : Optional[int] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
UpperCAmelCase__ : int = [96, 120, 144]
UpperCAmelCase__ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
UpperCAmelCase__ : Optional[Any] = [64, 80, 96]
UpperCAmelCase__ : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
UpperCAmelCase__ : Any = 0.05
UpperCAmelCase__ : List[Any] = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
UpperCAmelCase__ : List[Any] = 512
UpperCAmelCase__ : List[Any] = 16
UpperCAmelCase__ : Optional[int] = 21
UpperCAmelCase__ : Union[str, Any] = 'pascal-voc-id2label.json'
else:
UpperCAmelCase__ : int = 1000
UpperCAmelCase__ : Optional[int] = 'imagenet-1k-id2label.json'
UpperCAmelCase__ : Dict = 'huggingface/label-files'
UpperCAmelCase__ : Any = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type='dataset' ) ,'r' ) )
UpperCAmelCase__ : int = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : str = idalabel
UpperCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( _snake_case ,_snake_case=False ):
for i in range(1 ,6 ):
if F'''layer_{i}.''' in name:
UpperCAmelCase__ : int = name.replace(F'''layer_{i}.''' ,F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
UpperCAmelCase__ : Tuple = name.replace('conv_1.' ,'conv_stem.' )
if ".block." in name:
UpperCAmelCase__ : List[str] = name.replace('.block.' ,'.' )
if "exp_1x1" in name:
UpperCAmelCase__ : Tuple = name.replace('exp_1x1' ,'expand_1x1' )
if "red_1x1" in name:
UpperCAmelCase__ : Tuple = name.replace('red_1x1' ,'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
UpperCAmelCase__ : Optional[Any] = name.replace('.local_rep.conv_3x3.' ,'.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
UpperCAmelCase__ : str = name.replace('.local_rep.conv_1x1.' ,'.conv_1x1.' )
if ".norm." in name:
UpperCAmelCase__ : Optional[Any] = name.replace('.norm.' ,'.normalization.' )
if ".conv." in name:
UpperCAmelCase__ : List[str] = name.replace('.conv.' ,'.convolution.' )
if ".conv_proj." in name:
UpperCAmelCase__ : Any = name.replace('.conv_proj.' ,'.conv_projection.' )
for i in range(0 ,2 ):
for j in range(0 ,4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase__ : Dict = name.replace(F'''.{i}.{j}.''' ,F'''.{i}.layer.{j}.''' )
for i in range(2 ,6 ):
for j in range(0 ,4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase__ : Optional[int] = name.replace(F'''.{i}.{j}.''' ,F'''.{i}.''' )
if "expand_1x1" in name:
UpperCAmelCase__ : str = name.replace('expand_1x1' ,'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
UpperCAmelCase__ : Any = name.replace('conv_3x3' ,'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
UpperCAmelCase__ : List[str] = name.replace('reduce_1x1' ,'downsampling_layer.reduce_1x1' )
for i in range(2 ,5 ):
if F'''.global_rep.{i}.weight''' in name:
UpperCAmelCase__ : Any = name.replace(F'''.global_rep.{i}.weight''' ,'.layernorm.weight' )
if F'''.global_rep.{i}.bias''' in name:
UpperCAmelCase__ : Tuple = name.replace(F'''.global_rep.{i}.bias''' ,'.layernorm.bias' )
if ".global_rep." in name:
UpperCAmelCase__ : Tuple = name.replace('.global_rep.' ,'.transformer.' )
if ".pre_norm_mha.0." in name:
UpperCAmelCase__ : Union[str, Any] = name.replace('.pre_norm_mha.0.' ,'.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
UpperCAmelCase__ : str = name.replace('.pre_norm_mha.1.out_proj.' ,'.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
UpperCAmelCase__ : Optional[Any] = name.replace('.pre_norm_ffn.0.' ,'.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
UpperCAmelCase__ : Optional[int] = name.replace('.pre_norm_ffn.1.' ,'.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
UpperCAmelCase__ : Union[str, Any] = name.replace('.pre_norm_ffn.4.' ,'.output.dense.' )
if ".transformer." in name:
UpperCAmelCase__ : List[str] = name.replace('.transformer.' ,'.transformer.layer.' )
if ".aspp_layer." in name:
UpperCAmelCase__ : Optional[int] = name.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in name:
UpperCAmelCase__ : Dict = name.replace('.aspp_pool.' ,'.' )
if "seg_head." in name:
UpperCAmelCase__ : str = name.replace('seg_head.' ,'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
UpperCAmelCase__ : List[str] = name.replace('segmentation_head.classifier.classifier.' ,'segmentation_head.classifier.' )
if "classifier.fc." in name:
UpperCAmelCase__ : Dict = name.replace('classifier.fc.' ,'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
UpperCAmelCase__ : Optional[int] = 'mobilevit.' + name
return name
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case=False ):
if base_model:
UpperCAmelCase__ : int = ''
else:
UpperCAmelCase__ : List[Any] = 'mobilevit.'
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : str = orig_state_dict.pop(_snake_case )
if key[:8] == "encoder.":
UpperCAmelCase__ : Optional[int] = key[8:]
if "qkv" in key:
UpperCAmelCase__ : Optional[int] = key.split('.' )
UpperCAmelCase__ : Dict = int(key_split[0][6:] ) - 1
UpperCAmelCase__ : Dict = int(key_split[3] )
UpperCAmelCase__ : List[Any] = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
UpperCAmelCase__ : Union[str, Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCAmelCase__ : Tuple = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
UpperCAmelCase__ : List[Any] = val[:dim, :]
UpperCAmelCase__ : Optional[int] = val[dim : dim * 2, :]
UpperCAmelCase__ : str = val[-dim:, :]
else:
UpperCAmelCase__ : List[str] = val[:dim]
UpperCAmelCase__ : Union[str, Any] = val[dim : dim * 2]
UpperCAmelCase__ : int = val[-dim:]
else:
UpperCAmelCase__ : Dict = val
return orig_state_dict
def lowerCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ : Union[str, Any] = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case=False ):
UpperCAmelCase__ : Optional[Any] = get_mobilevit_config(_snake_case )
# load original state_dict
UpperCAmelCase__ : List[str] = torch.load(_snake_case ,map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
UpperCAmelCase__ : Dict = MobileViTForSemanticSegmentation(_snake_case ).eval()
else:
UpperCAmelCase__ : Tuple = MobileViTForImageClassification(_snake_case ).eval()
UpperCAmelCase__ : List[str] = convert_state_dict(_snake_case ,_snake_case )
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase__ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
UpperCAmelCase__ : Tuple = image_processor(images=prepare_img() ,return_tensors='pt' )
UpperCAmelCase__ : Dict = model(**_snake_case )
UpperCAmelCase__ : int = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCAmelCase__ : int = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCAmelCase__ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] ,_snake_case ,atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
UpperCAmelCase__ : Union[str, Any] = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
UpperCAmelCase__ : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
UpperCAmelCase__ : str = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] ,_snake_case ,atol=1e-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
UpperCAmelCase__ : Dict = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
UpperCAmelCase__ : Optional[Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(_snake_case ,organization='apple' )
model.push_to_hub(_snake_case ,organization='apple' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 701 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a ( lowercase ):
UpperCamelCase : Dict = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **UpperCamelCase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ : int = deprecated_arg[3:]
UpperCAmelCase__ : Tuple = not kwargs.pop(UpperCamelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase__ : Dict = kwargs.pop('tpu_name' , self.tpu_name )
UpperCAmelCase__ : Tuple = kwargs.pop('device_idx' , self.device_idx )
UpperCAmelCase__ : List[str] = kwargs.pop('eager_mode' , self.eager_mode )
UpperCAmelCase__ : Any = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**UpperCamelCase_ )
UpperCamelCase : str = field(
default=lowercase , metadata={"""help""": """Name of TPU"""} , )
UpperCamelCase : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
UpperCamelCase : bool = field(default=lowercase , metadata={"""help""": """Benchmark models in eager model."""} )
UpperCamelCase : bool = field(
default=lowercase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
UpperCAmelCase__ : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase__ : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase__ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase__ : Tuple = None
return tpu
@cached_property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase__ : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
UpperCAmelCase__ : Any = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
UpperCAmelCase__ : Any = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __snake_case ( self ):
return self.n_gpu > 0
| 254 | 0 |
'''simple docstring'''
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
UpperCAmelCase__ : Dict = set()
# Replace all the whitespace in our sentence
UpperCAmelCase__ : str = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase__ ) == 2_6
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
UpperCAmelCase__ : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase__ : List[Any] = True
elif char.isupper():
UpperCAmelCase__ : List[Any] = True
return all(lowercase__ )
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def snake_case_ ( ):
from timeit import timeit
UpperCAmelCase__ : Union[str, Any] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowercase__ ) )
print(timeit("is_pangram_faster()" , setup=lowercase__ ) )
print(timeit("is_pangram_fastest()" , setup=lowercase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , snake_case__ : int , snake_case__ : int = 3 , snake_case__ : int = 1 , snake_case__ : int = 1 , snake_case__ : Optional[str] = "relu" , **snake_case__ : Dict , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCAmelCase__ : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCAmelCase__ : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding="VALID" , groups=snake_case__ , use_bias=snake_case__ , name="convolution" , )
UpperCAmelCase__ : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
UpperCAmelCase__ : Tuple = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase ( self : str , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : str = self.convolution(self.padding(snake_case__ ) )
UpperCAmelCase__ : Dict = self.normalization(snake_case__ )
UpperCAmelCase__ : Any = self.activation(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : RegNetConfig , **snake_case__ : Optional[int] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Any = config.num_channels
UpperCAmelCase__ : Optional[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def UpperCamelCase ( self : int , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCAmelCase__ : List[str] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
UpperCAmelCase__ : Dict = self.embedder(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : int = 2 , **snake_case__ : Optional[int] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name="convolution" )
UpperCAmelCase__ : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def UpperCamelCase ( self : str , snake_case__ : tf.Tensor , snake_case__ : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , snake_case__ : int , snake_case__ : int , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name="pooler" )
UpperCAmelCase__ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def UpperCamelCase ( self : Optional[int] , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.pooler(snake_case__ )
for layer_module in self.attention:
UpperCAmelCase__ : Dict = layer_module(snake_case__ )
UpperCAmelCase__ : int = hidden_state * pooled
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : str ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Optional[Any] = in_channels != out_channels or stride != 1
UpperCAmelCase__ : Optional[int] = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Tuple = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCAmelCase__ : int = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name="layer.2" ),
]
UpperCAmelCase__ : List[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = hidden_state
for layer_module in self.layers:
UpperCAmelCase__ : List[str] = layer_module(snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.shortcut(snake_case__ )
hidden_state += residual
UpperCAmelCase__ : Union[str, Any] = self.activation(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = in_channels != out_channels or stride != 1
UpperCAmelCase__ : Union[str, Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Dict = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
UpperCAmelCase__ : Optional[Any] = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name="layer.3" ),
]
UpperCAmelCase__ : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = hidden_state
for layer_module in self.layers:
UpperCAmelCase__ : Tuple = layer_module(snake_case__ )
UpperCAmelCase__ : Dict = self.shortcut(snake_case__ )
hidden_state += residual
UpperCAmelCase__ : List[Any] = self.activation(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 2 , **snake_case__ : Any ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Any = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
UpperCAmelCase__ : int = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name="layers.0" ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
for layer_module in self.layers:
UpperCAmelCase__ : Tuple = layer_module(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : RegNetConfig , **snake_case__ : Any ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
UpperCAmelCase__ : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F"""stages.{i+1}""" ) )
def UpperCamelCase ( self : Dict , snake_case__ : tf.Tensor , snake_case__ : bool = False , snake_case__ : bool = True ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase__ : Optional[Any] = hidden_states + (hidden_state,)
UpperCAmelCase__ : Dict = stage_module(snake_case__ )
if output_hidden_states:
UpperCAmelCase__ : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
lowercase_ : Dict = RegNetConfig
def __init__( self : int , snake_case__ : str , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : List[Any] = config
UpperCAmelCase__ : Dict = TFRegNetEmbeddings(snake_case__ , name="embedder" )
UpperCAmelCase__ : int = TFRegNetEncoder(snake_case__ , name="encoder" )
UpperCAmelCase__ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name="pooler" )
@unpack_inputs
def UpperCamelCase ( self : Dict , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : str = self.embedder(snake_case__ , training=snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
UpperCAmelCase__ : Optional[int] = encoder_outputs[0]
UpperCAmelCase__ : Optional[Any] = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
UpperCAmelCase__ : List[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
UpperCAmelCase__ : Any = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCAmelCase__ : Optional[int] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : List[Any] = RegNetConfig
lowercase_ : Optional[int] = "regnet"
lowercase_ : Optional[Any] = "pixel_values"
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , A , )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : RegNetConfig , *snake_case__ : Any , **snake_case__ : List[str] ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
UpperCAmelCase__ : Any = TFRegNetMainLayer(snake_case__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self : Optional[int] , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : int=False , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : List[str] = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , A , )
class UpperCAmelCase_ ( A , A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case__ : RegNetConfig , *snake_case__ : Optional[Any] , **snake_case__ : int ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
UpperCAmelCase__ : Optional[Any] = config.num_labels
UpperCAmelCase__ : str = TFRegNetMainLayer(snake_case__ , name="regnet" )
# classification head
UpperCAmelCase__ : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self : List[str] , snake_case__ : tf.Tensor = None , snake_case__ : tf.Tensor = None , snake_case__ : bool = None , snake_case__ : bool = None , snake_case__ : List[Any]=False , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
UpperCAmelCase__ : Dict = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase__ : Union[str, Any] = self.classifier[0](snake_case__ )
UpperCAmelCase__ : str = self.classifier[1](snake_case__ )
UpperCAmelCase__ : Any = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
UpperCAmelCase__ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 199 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowercase : int = logging.get_logger(__name__)
__lowercase : Optional[int] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''deberta-v2'''
def __init__( self : Union[str, Any] , A_ : Union[str, Any]=128_100 , A_ : List[Any]=1_536 , A_ : List[str]=24 , A_ : Dict=24 , A_ : Union[str, Any]=6_144 , A_ : Optional[Any]="gelu" , A_ : Dict=0.1 , A_ : Optional[int]=0.1 , A_ : List[str]=512 , A_ : Tuple=0 , A_ : List[str]=0.02 , A_ : Tuple=1E-7 , A_ : str=False , A_ : Dict=-1 , A_ : str=0 , A_ : Any=True , A_ : Optional[int]=None , A_ : str=0 , A_ : List[Any]="gelu" , **A_ : List[str] , ) -> List[str]:
super().__init__(**A_ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = relative_attention
__snake_case = max_relative_positions
__snake_case = pad_token_id
__snake_case = position_biased_input
# Backwards compatibility
if type(A_ ) == str:
__snake_case = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__snake_case = pos_att_type
__snake_case = vocab_size
__snake_case = layer_norm_eps
__snake_case = kwargs.get('''pooler_hidden_size''' , A_ )
__snake_case = pooler_dropout
__snake_case = pooler_hidden_act
class _A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def lowercase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def lowercase ( self : Tuple ) -> int:
return 12
def lowercase ( self : Optional[int] , A_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , A_ : int = -1 , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional["TensorType"] = None , A_ : int = 3 , A_ : int = 40 , A_ : int = 40 , A_ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
__snake_case = super().generate_dummy_inputs(preprocessor=A_ , framework=A_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 706 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase ( self : Optional[int] , A_ : List[str]=0 ) -> int:
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(A_ ) )
__snake_case = np.random.RandomState(A_ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase ( self : Optional[Any] ) -> List[Any]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase ( self : Tuple ) -> Optional[int]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : Optional[int] ) -> str:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : str ) -> List[str]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : List[str] ) -> Any:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : str ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase ( self : str ) -> Optional[int]:
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCAmelCase ( snake_case_ ):
def __init__( self :Any , lowercase_ :Tuple = None , lowercase_ :int = None , lowercase_ :Union[str, Any] = None , lowercase_ :int = None , lowercase_ :Union[str, Any] = False , lowercase_ :Tuple = False , lowercase_ :Optional[Any] = None , **lowercase_ :Any , )-> Optional[Any]:
A__ = path_or_paths
A__ = split if split or isinstance(lowercase_ , lowercase_ ) else "train"
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def UpperCAmelCase_ ( self :int )-> List[Any]:
pass
class UpperCAmelCase ( snake_case_ ):
def __init__( self :int , lowercase_ :List[Any] = None , lowercase_ :List[Any] = None , lowercase_ :Tuple = False , lowercase_ :Optional[int] = False , lowercase_ :List[Any] = None , **lowercase_ :str , )-> Any:
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def UpperCAmelCase_ ( self :Tuple )-> List[str]:
pass
| 440 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCamelCase( a , a ):
__a = u
for i in range(1 , a ):
__a = temp * (u - i)
return temp
def _lowerCamelCase( ):
__a = int(input("enter the numbers of values: " ) )
__a = []
for _ in range(a ):
y.append([] )
for i in range(a ):
for j in range(a ):
y[i].append(a )
__a = 0
print("enter the values of parameters in a list: " )
__a = list(map(a , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a ):
__a = float(input() )
__a = int(input("enter the value to interpolate: " ) )
__a = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a ):
for j in range(n - i ):
__a = y[j + 1][i - 1] - y[j][i - 1]
__a = y[0][0]
for i in range(1 , a ):
summ += (ucal(a , a ) * y[0][i]) / math.factorial(a )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 528 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Any = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__snake_case : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Tuple = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ["""GLPNFeatureExtractor"""]
__snake_case : Optional[Any] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , )-> Dict:
_A = size if size is not None else {'height': 20, 'width': 20}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = size
_A = do_normalize
_A = do_convert_rgb
_A = [512, 1024, 2048, 4096]
_A = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCamelCase ( self )-> Dict:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase ( self )-> Tuple:
_A = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_A = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase ( self )-> Optional[int]:
_A = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase ( self )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> List[Any]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase ( self )-> Any:
_A = self.image_processor_tester.prepare_dummy_image()
_A = self.image_processing_class(**self.image_processor_dict )
_A = 2048
_A = image_processor(_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase ( self )-> int:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> List[str]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCamelCase ):
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
_A = 'Hello'
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> Optional[int]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> Union[str, Any]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase ( self )-> str:
_A = PixaStructImageProcessingTester(self , num_channels=4 )
_A = 3
@property
def UpperCamelCase ( self )-> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> Any:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase ( self )-> Optional[Any]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 292 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase ( self )-> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self )-> Union[str, Any]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self )-> Optional[Any]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCamelCase ( self )-> Optional[int]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_UpperCamelCase , )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 292 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : str = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = 0, 0, 0
_UpperCAmelCase : Any = ugly_nums[ia] * 2
_UpperCAmelCase : List[Any] = ugly_nums[ia] * 3
_UpperCAmelCase : List[str] = ugly_nums[ia] * 5
for _ in range(1, a_ ):
_UpperCAmelCase : str = min(a_, a_, a_ )
ugly_nums.append(a_ )
if next_num == next_a:
ia += 1
_UpperCAmelCase : str = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCAmelCase : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCAmelCase : Any = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 257 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__a = TypeVar('T')
class A__ ( Generic[T] ):
"""simple docstring"""
UpperCamelCase_ : deque[T] # Cache store of keys
UpperCamelCase_ : set[T] # References of the keys in cache
UpperCamelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : Dict , lowerCAmelCase__ : int ) -> None:
"""simple docstring"""
_UpperCAmelCase : List[str] = deque()
_UpperCAmelCase : Any = set()
if not n:
_UpperCAmelCase : Dict = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_UpperCAmelCase : List[Any] = n
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : T ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCAmelCase : int = self.dq_store.pop()
self.key_reference.remove(lowerCAmelCase__ )
else:
self.dq_store.remove(lowerCAmelCase__ )
self.dq_store.appendleft(lowerCAmelCase__ )
self.key_reference.add(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(lowerCAmelCase__ )
def __repr__( self : Tuple ) -> str:
"""simple docstring"""
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 257 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class snake_case ( UpperCamelCase_ ):
lowercase_ = 42
lowercase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 85 |
"""simple docstring"""
import numpy as np
def A ( snake_case__ ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Dict ):
with open(snake_case_ ) as metadata_file:
__magic_name__ = json.load(snake_case_ )
__magic_name__ = LukeConfig(use_entity_aware_attention=snake_case_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )["module"]
# Load the entity vocab file
__magic_name__ = load_original_entity_vocab(snake_case_ )
# add an entry for [MASK2]
__magic_name__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__magic_name__ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__magic_name__ = AddedToken('''<ent>''' , lstrip=snake_case_ , rstrip=snake_case_ )
__magic_name__ = AddedToken('''<ent2>''' , lstrip=snake_case_ , rstrip=snake_case_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , '''tokenizer_config.json''' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
__magic_name__ = "MLukeTokenizer"
with open(os.path.join(snake_case_ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
with open(os.path.join(snake_case_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
__magic_name__ = MLukeTokenizer.from_pretrained(snake_case_ )
# Initialize the embeddings of the special tokens
__magic_name__ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
__magic_name__ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
__magic_name__ = state_dict["embeddings.word_embeddings.weight"]
__magic_name__ = word_emb[ent_init_index].unsqueeze(0 )
__magic_name__ = word_emb[enta_init_index].unsqueeze(0 )
__magic_name__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__magic_name__ = state_dict[bias_name]
__magic_name__ = decoder_bias[ent_init_index].unsqueeze(0 )
__magic_name__ = decoder_bias[enta_init_index].unsqueeze(0 )
__magic_name__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__magic_name__ = f'encoder.layer.{layer_index}.attention.self.'
__magic_name__ = state_dict[prefix + matrix_name]
__magic_name__ = state_dict[prefix + matrix_name]
__magic_name__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__magic_name__ = state_dict["entity_embeddings.entity_embeddings.weight"]
__magic_name__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__magic_name__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__magic_name__ = state_dict["entity_predictions.bias"]
__magic_name__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__magic_name__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
__magic_name__ = LukeForMaskedLM(config=snake_case_ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
__magic_name__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
__magic_name__ = state_dict[key]
else:
__magic_name__ = state_dict[key]
__magic_name__ = model.load_state_dict(snake_case_ , strict=snake_case_ )
if set(snake_case_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(snake_case_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__magic_name__ = MLukeTokenizer.from_pretrained(snake_case_ , task='''entity_classification''' )
__magic_name__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__magic_name__ = (0, 9)
__magic_name__ = tokenizer(snake_case_ , entity_spans=[span] , return_tensors='''pt''' )
__magic_name__ = model(**snake_case_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__magic_name__ = torch.Size((1, 33, 768) )
__magic_name__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__magic_name__ = torch.Size((1, 1, 768) )
__magic_name__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__magic_name__ = MLukeTokenizer.from_pretrained(snake_case_ )
__magic_name__ = "Tokyo is the capital of <mask>."
__magic_name__ = (24, 30)
__magic_name__ = tokenizer(snake_case_ , entity_spans=[span] , return_tensors='''pt''' )
__magic_name__ = model(**snake_case_ )
__magic_name__ = encoding["input_ids"][0].tolist()
__magic_name__ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
__magic_name__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case_ )
__magic_name__ = outputs.entity_logits[0][0].argmax().item()
__magic_name__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(snake_case_ ) )
model.save_pretrained(snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
__magic_name__ = ["[MASK]", "[PAD]", "[UNK]"]
__magic_name__ = [json.loads(snake_case_ ) for line in open(snake_case_ )]
__magic_name__ = {}
for entry in data:
__magic_name__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__magic_name__ = entity_id
break
__magic_name__ = f'{language}:{entity_name}'
__magic_name__ = entity_id
return new_mapping
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 721 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 678 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """data2vec-text"""
def __init__( self: Any,A_: Union[str, Any]=3_0522,A_: Optional[Any]=768,A_: Optional[int]=12,A_: int=12,A_: int=3072,A_: Tuple="gelu",A_: List[str]=0.1,A_: str=0.1,A_: str=512,A_: Union[str, Any]=2,A_: Union[str, Any]=0.0_2,A_: Optional[int]=1E-12,A_: Dict=1,A_: str=0,A_: Any=2,A_: Optional[int]="absolute",A_: Tuple=True,A_: int=None,**A_: List[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 1 |
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 714 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = KandinskyVaaInpaintPipeline
__snake_case = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
__snake_case = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
__snake_case = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__snake_case = False
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_unet
lowerCAmelCase_ = self.dummy_movq
lowerCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000, beta_schedule='''linear''', beta_start=0.00_085, beta_end=0.012, clip_sample=UpperCamelCase__, set_alpha_to_one=UpperCamelCase__, steps_offset=1, prediction_type='''epsilon''', thresholding=UpperCamelCase__, )
lowerCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=0 ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
lowerCAmelCase_ = floats_tensor((1, 3, 64, 64), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
lowerCAmelCase_ = np.ones((64, 64), dtype=np.floataa )
lowerCAmelCase_ = 0
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCAmelCase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCAmelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCAmelCase_ = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCAmelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ), return_dict=UpperCamelCase__, )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
lowerCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCAmelCase_ = np.ones((768, 768), dtype=np.floataa )
lowerCAmelCase_ = 0
lowerCAmelCase_ = '''a hat'''
lowerCAmelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
lowerCAmelCase_ = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''', torch_dtype=torch.floataa )
lowerCAmelCase_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ , lowerCAmelCase_ = pipe_prior(
UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=5, negative_prompt='''''', ).to_tuple()
lowerCAmelCase_ = pipeline(
image=UpperCamelCase__, mask_image=UpperCamelCase__, image_embeds=UpperCamelCase__, negative_image_embeds=UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=100, height=768, width=768, output_type='''np''', )
lowerCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
| 325 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__)
class A_ ( _UpperCAmelCase ):
def __init__( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : str ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 428 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowercase : str = KandinskyVaaControlnetPipeline
lowercase : Any = ['image_embeds', 'negative_image_embeds', 'hint']
lowercase : List[str] = ['image_embeds', 'negative_image_embeds', 'hint']
lowercase : Dict = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase : Dict = False
@property
def a_ ( self ):
return 32
@property
def a_ ( self ):
return 32
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 100
@property
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : Tuple = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase : List[str] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a_ ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self ):
UpperCamelCase : Optional[Any] = self.dummy_unet
UpperCamelCase : int = self.dummy_movq
UpperCamelCase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
UpperCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a_ ( self ):
UpperCamelCase : Dict = """cpu"""
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Union[str, Any] = output.images
UpperCamelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
UpperCamelCase : int = image[0, -3:, -3:, -1]
UpperCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
UpperCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
UpperCamelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCamelCase : Any = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 255.0
UpperCamelCase : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCamelCase : str = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = """A robot, 4k photo"""
UpperCamelCase : Optional[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : List[Any] = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCamelCase : Dict = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , output_type="""np""" , )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 499 | 0 |
from random import randint, random
def lowercase ( _a ,_a ,_a ,_a = False ,_a = False ,_a = 5 ,) -> list:
UpperCAmelCase_: List[str] = [[-1] * number_of_cells] # Create a highway without any car
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: List[str] = max(_a ,0 )
while i < number_of_cells:
UpperCAmelCase_: List[Any] = (
randint(0 ,_a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 ,max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowercase ( _a ,_a ) -> int:
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(_a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_a ,-1 )
def lowercase ( _a ,_a ,_a ) -> list:
UpperCAmelCase_: Dict = len(_a )
# Beforce calculations, the highway is empty
UpperCAmelCase_: Dict = [-1] * number_of_cells
for car_index in range(_a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
UpperCAmelCase_: Optional[int] = min(highway_now[car_index] + 1 ,_a )
# Number of empty cell before the next car
UpperCAmelCase_: int = get_distance(_a ,_a ) - 1
# We can't have the car causing an accident
UpperCAmelCase_: Optional[Any] = min(next_highway[car_index] ,_a )
if random() < probability:
# Randomly, a driver will slow down
UpperCAmelCase_: Dict = max(next_highway[car_index] - 1 ,0 )
return next_highway
def lowercase ( _a ,_a ,_a ,_a ) -> list:
UpperCAmelCase_: List[str] = len(highway[0] )
for i in range(_a ):
UpperCAmelCase_: str = update(highway[i] ,_a ,_a )
UpperCAmelCase_: Dict = [-1] * number_of_cells
for car_index in range(_a ):
UpperCAmelCase_: Optional[int] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
UpperCAmelCase_: Any = (car_index + speed) % number_of_cells
# Commit the change of position
UpperCAmelCase_: Optional[int] = speed
highway.append(_a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase__ ( snake_case__ ):
def __init__( self , *A__ , **A__ ):
"""simple docstring"""
super().__init__(*A__ , **A__ )
UpperCAmelCase_: Tuple = {}
def snake_case_ ( self , A__ , *A__ , **A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = super().add_tokens(A__ , *A__ , **A__ )
if num_added_tokens == 0:
raise ValueError(
F"The tokenizer already contains the token {placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer." )
def snake_case_ ( self , A__ , *A__ , A__=1 , **A__ ):
"""simple docstring"""
UpperCAmelCase_: Dict = []
if num_vec_per_token == 1:
self.try_adding_tokens(A__ , *A__ , **A__ )
output.append(A__ )
else:
UpperCAmelCase_: Any = []
for i in range(A__ ):
UpperCAmelCase_: Dict = placeholder_token + F"_{i}"
self.try_adding_tokens(A__ , *A__ , **A__ )
output.append(A__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"The tokenizer already has placeholder token {token} that can get confused with"
F" {placeholder_token}keep placeholder tokens independent" )
UpperCAmelCase_: Any = output
def snake_case_ ( self , A__ , A__=False , A__=1.0 ):
"""simple docstring"""
if isinstance(A__ , A__ ):
UpperCAmelCase_: int = []
for i in range(len(A__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=A__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCAmelCase_: Union[str, Any] = self.token_map[placeholder_token]
UpperCAmelCase_: Union[str, Any] = tokens[: 1 + int(len(A__ ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCAmelCase_: Tuple = copy.copy(A__ )
random.shuffle(A__ )
UpperCAmelCase_: str = text.replace(A__ , " ".join(A__ ) )
return text
def __call__( self , A__ , *A__ , A__=False , A__=1.0 , **A__ ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
A__ , vector_shuffle=A__ , prop_tokens_to_load=A__ ) , *A__ , **A__ , )
def snake_case_ ( self , A__ , *A__ , A__=False , A__=1.0 , **A__ ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
A__ , vector_shuffle=A__ , prop_tokens_to_load=A__ ) , *A__ , **A__ , )
| 306 | 1 |
_lowerCamelCase : int = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
_lowerCamelCase : Union[str, Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = from_type.lower().strip('''s''' )
A__ = to_type.lower().strip('''s''' )
A__ = UNIT_SYMBOL.get(lowercase_ , lowercase_ )
A__ = UNIT_SYMBOL.get(lowercase_ , lowercase_ )
if from_sanitized not in METRIC_CONVERSION:
A__ = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(lowercase_ )}"""
)
raise ValueError(lowercase_ )
if to_sanitized not in METRIC_CONVERSION:
A__ = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(lowercase_ )}"""
)
raise ValueError(lowercase_ )
A__ = METRIC_CONVERSION[from_sanitized]
A__ = METRIC_CONVERSION[to_sanitized]
A__ = 1
if from_exponent > to_exponent:
A__ = from_exponent - to_exponent
else:
A__ = -(to_exponent - from_exponent)
return value * pow(10 , lowercase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int]=7 ,__lowerCamelCase : Optional[int]=3 ,__lowerCamelCase : Any=18 ,__lowerCamelCase : List[Any]=30 ,__lowerCamelCase : Optional[Any]=4_00 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : int=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] ,__lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = do_resize
a = size if size is not None else {'''height''': 18, '''width''': 20}
a = do_thumbnail
a = do_align_axis
a = do_pad
a = do_normalize
a = image_mean
a = image_std
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_std''' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 20} )
a = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
a = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{'''height''': 84, '''width''': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
a = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
a = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
a = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 387 | 0 |
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
stooge(__snake_case ,0 ,len(__snake_case ) - 1 )
return arr
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCamelCase__ , lowerCamelCase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCamelCase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__snake_case ,__snake_case ,(h - t) )
# Recursively sort last 2/3 elements
stooge(__snake_case ,i + t ,(__snake_case) )
# Recursively sort first 2/3 elements
stooge(__snake_case ,__snake_case ,(h - t) )
if __name__ == "__main__":
_a : List[Any] = input("Enter numbers separated by a comma:\n").strip()
_a : List[str] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 716 |
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 16000 ):
'''simple docstring'''
__lowerCamelCase : Tuple =int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE ) <= sample_length:
return wav
__lowerCamelCase : Tuple =randint(0 , len(SCREAMING_SNAKE_CASE ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(default=snake_case__ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
__snake_case : str = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__snake_case : str = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__snake_case : str = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
__snake_case : str = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
__snake_case : Optional[int] = field(
default=snake_case__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__snake_case : Optional[int] = field(
default=snake_case__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__snake_case : float = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
__snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Name or path of preprocessor config."""} )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
__snake_case : bool = field(
default=snake_case__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__snake_case : Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowercase ( self :Union[str, Any] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __lowercase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : int =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase : List[str] =training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__lowerCamelCase : Dict =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : List[str] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
__lowerCamelCase : str =DatasetDict()
__lowerCamelCase : Union[str, Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : int =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__lowerCamelCase : Optional[int] =AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__lowerCamelCase : str =raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__lowerCamelCase : Optional[int] =feature_extractor.model_input_names[0]
def train_transforms(SCREAMING_SNAKE_CASE : str ):
__lowerCamelCase : List[Any] =[]
for audio in batch[data_args.audio_column_name]:
__lowerCamelCase : Any =random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =feature_extractor(SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
__lowerCamelCase : str ={model_input_name: inputs.get(SCREAMING_SNAKE_CASE )}
__lowerCamelCase : List[str] =list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(SCREAMING_SNAKE_CASE : Optional[int] ):
__lowerCamelCase : Optional[int] =[audio['''array'''] for audio in batch[data_args.audio_column_name]]
__lowerCamelCase : str =feature_extractor(SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
__lowerCamelCase : Tuple ={model_input_name: inputs.get(SCREAMING_SNAKE_CASE )}
__lowerCamelCase : str =list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCamelCase : Union[str, Any] =raw_datasets['''train'''].features[data_args.label_column_name].names
__lowerCamelCase , __lowerCamelCase : int ={}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[str] =str(SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =label
# Load the accuracy metric from the datasets package
__lowerCamelCase : Any =evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE : Union[str, Any] ):
__lowerCamelCase : Tuple =np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE , references=eval_pred.label_ids )
__lowerCamelCase : List[Any] =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Union[str, Any] =AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowerCamelCase : List[Any] =(
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE , output_all_columns=SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowerCamelCase : int =(
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE , output_all_columns=SCREAMING_SNAKE_CASE )
# Initialize our trainer
__lowerCamelCase : Tuple =Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowerCamelCase : Any =None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase : Tuple =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase : Any =last_checkpoint
__lowerCamelCase : int =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCamelCase : Union[str, Any] =trainer.evaluate()
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__lowerCamelCase : List[str] ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 179 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_UpperCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Optional[int] = ["""pixel_values"""]
def __init__( self :Optional[Any] , __lowercase :bool = True , __lowercase :Optional[Dict[str, int]] = None , __lowercase :PILImageResampling = PILImageResampling.BILINEAR , __lowercase :bool = True , __lowercase :Dict[str, int] = None , __lowercase :bool = True , __lowercase :Union[int, float] = 1 / 255 , __lowercase :bool = True , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[float, List[float]]] = None , **__lowercase :Tuple , ):
super().__init__(**__lowercase )
__lowerCamelCase : int =size if size is not None else {'''shortest_edge''': 256}
__lowerCamelCase : int =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCamelCase : Optional[Any] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowerCamelCase : List[str] =get_size_dict(__lowercase , param_name='''crop_size''' )
__lowerCamelCase : List[str] =do_resize
__lowerCamelCase : Optional[Any] =size
__lowerCamelCase : Tuple =resample
__lowerCamelCase : int =do_center_crop
__lowerCamelCase : Optional[Any] =crop_size
__lowerCamelCase : int =do_rescale
__lowerCamelCase : str =rescale_factor
__lowerCamelCase : int =do_normalize
__lowerCamelCase : Union[str, Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : Tuple =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self :Union[str, Any] , __lowercase :np.ndarray , __lowercase :Dict[str, int] , __lowercase :PILImageResampling = PILImageResampling.BICUBIC , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :str , ):
__lowerCamelCase : Tuple =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowerCamelCase : List[Any] =get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :Union[str, Any] , __lowercase :np.ndarray , __lowercase :Dict[str, int] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :int , ):
__lowerCamelCase : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __lowercase ( self :str , __lowercase :np.ndarray , __lowercase :float , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :str ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :Union[str, Any] , __lowercase :np.ndarray , __lowercase :Union[float, List[float]] , __lowercase :Union[float, List[float]] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :Union[str, Any] , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :List[str] , __lowercase :ImageInput , __lowercase :Optional[bool] = None , __lowercase :Dict[str, int] = None , __lowercase :PILImageResampling = None , __lowercase :bool = None , __lowercase :Dict[str, int] = None , __lowercase :Optional[bool] = None , __lowercase :Optional[float] = None , __lowercase :Optional[bool] = None , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[str, TensorType]] = None , __lowercase :Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase :Optional[Any] , ):
__lowerCamelCase : Optional[Any] =do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Optional[Any] =size if size is not None else self.size
__lowerCamelCase : List[str] =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCamelCase : List[str] =resample if resample is not None else self.resample
__lowerCamelCase : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : Any =crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Union[str, Any] =get_size_dict(__lowercase , param_name='''crop_size''' )
__lowerCamelCase : Any =do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : List[Any] =do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : List[Any] =image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : int =image_std if image_std is not None else self.image_std
__lowerCamelCase : int =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase : int =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
__lowerCamelCase : Tuple =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__lowerCamelCase : Any =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__lowerCamelCase : Optional[Any] =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__lowerCamelCase : Tuple =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__lowerCamelCase : List[str] =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCamelCase : Union[str, Any] ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def __lowercase ( self :Dict , __lowercase :Dict , __lowercase :List[Tuple] = None ):
__lowerCamelCase : Dict =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowercase ):
__lowerCamelCase : Optional[int] =target_sizes.numpy()
__lowerCamelCase : List[Any] =[]
for idx in range(len(__lowercase ) ):
__lowerCamelCase : Optional[int] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowercase )
__lowerCamelCase : Union[str, Any] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
__lowerCamelCase : Union[str, Any] =logits.argmax(dim=1 )
__lowerCamelCase : Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 179 | 1 |
"""simple docstring"""
import os
def __UpperCAmelCase ( _snake_case : str = "input.txt" ):
with open(os.path.join(os.path.dirname(_lowercase ), _lowercase ) ) as input_file:
_lowercase = [
[int(_lowercase ) for element in line.split("," )]
for line in input_file.readlines()
]
_lowercase = len(_lowercase )
_lowercase = len(matrix[0] )
_lowercase = [[-1 for _ in range(_lowercase )] for _ in range(_lowercase )]
for i in range(_lowercase ):
_lowercase = matrix[i][0]
for j in range(1, _lowercase ):
for i in range(_lowercase ):
_lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, _lowercase ):
_lowercase = min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
_lowercase = min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 718 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : List[Any] = 1.6021E-19 # units = C
def __UpperCAmelCase ( _snake_case : float, _snake_case : float, _snake_case : float, ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __lowercase (unittest.TestCase ):
_lowerCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt')
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ : Dict = text_generator('This is a test' , do_sample=UpperCAmelCase_)
self.assertEqual(
UpperCAmelCase_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
UpperCamelCase__ : int = text_generator(['This is a test', 'This is a second test'])
self.assertEqual(
UpperCAmelCase_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
UpperCamelCase__ : List[str] = text_generator('This is a test' , do_sample=UpperCAmelCase_ , num_return_sequences=2 , return_tensors=UpperCAmelCase_)
self.assertEqual(
UpperCAmelCase_ , [
{'generated_token_ids': ANY(UpperCAmelCase_)},
{'generated_token_ids': ANY(UpperCAmelCase_)},
] , )
UpperCamelCase__ : List[Any] = text_generator.model.config.eos_token_id
UpperCamelCase__ : Optional[int] = '<pad>'
UpperCamelCase__ : str = text_generator(
['This is a test', 'This is a second test'] , do_sample=UpperCAmelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCAmelCase_ , )
self.assertEqual(
UpperCAmelCase_ , [
[
{'generated_token_ids': ANY(UpperCAmelCase_)},
{'generated_token_ids': ANY(UpperCAmelCase_)},
],
[
{'generated_token_ids': ANY(UpperCAmelCase_)},
{'generated_token_ids': ANY(UpperCAmelCase_)},
],
] , )
@require_tf
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Any = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf')
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ : Union[str, Any] = text_generator('This is a test' , do_sample=UpperCAmelCase_)
self.assertEqual(
UpperCAmelCase_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
UpperCamelCase__ : Union[str, Any] = text_generator(['This is a test', 'This is a second test'] , do_sample=UpperCAmelCase_)
self.assertEqual(
UpperCAmelCase_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Optional[Any] = TextGenerationPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_)
return text_generator, ["This is a test", "Another test"]
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[str] = 'Hello I believe in'
UpperCamelCase__ : List[str] = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : Optional[int] = text_generator(UpperCAmelCase_)
self.assertEqual(
UpperCAmelCase_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
UpperCamelCase__ : List[str] = text_generator(UpperCAmelCase_ , stop_sequence=' fe')
self.assertEqual(UpperCAmelCase_ , [{'generated_text': 'Hello I believe in fe'}])
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : int = text_generator.model
UpperCamelCase__ : Any = text_generator.tokenizer
UpperCamelCase__ : Dict = text_generator('This is a test')
self.assertEqual(UpperCAmelCase_ , [{'generated_text': ANY(UpperCAmelCase_)}])
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test'))
UpperCamelCase__ : int = text_generator('This is a test' , return_full_text=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , [{'generated_text': ANY(UpperCAmelCase_)}])
self.assertNotIn('This is a test' , outputs[0]['generated_text'])
UpperCamelCase__ : List[Any] = pipeline(task='text-generation' , model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , return_full_text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = text_generator('This is a test')
self.assertEqual(UpperCAmelCase_ , [{'generated_text': ANY(UpperCAmelCase_)}])
self.assertNotIn('This is a test' , outputs[0]['generated_text'])
UpperCamelCase__ : str = text_generator('This is a test' , return_full_text=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , [{'generated_text': ANY(UpperCAmelCase_)}])
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test'))
UpperCamelCase__ : List[Any] = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCAmelCase_)
self.assertEqual(
UpperCAmelCase_ , [
[{'generated_text': ANY(UpperCAmelCase_)}, {'generated_text': ANY(UpperCAmelCase_)}],
[{'generated_text': ANY(UpperCAmelCase_)}, {'generated_text': ANY(UpperCAmelCase_)}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ : Any = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCAmelCase_)
self.assertEqual(
UpperCAmelCase_ , [
[{'generated_text': ANY(UpperCAmelCase_)}, {'generated_text': ANY(UpperCAmelCase_)}],
[{'generated_text': ANY(UpperCAmelCase_)}, {'generated_text': ANY(UpperCAmelCase_)}],
] , )
with self.assertRaises(UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = text_generator('test' , return_full_text=UpperCAmelCase_ , return_text=UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_):
UpperCamelCase__ : List[str] = text_generator('test' , return_full_text=UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_):
UpperCamelCase__ : int = text_generator('test' , return_text=UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ : Optional[Any] = text_generator('')
self.assertEqual(UpperCAmelCase_ , [{'generated_text': ANY(UpperCAmelCase_)}])
else:
with self.assertRaises((ValueError, AssertionError)):
UpperCamelCase__ : Optional[int] = text_generator('')
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ : Dict = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator('This is a test' * 500 , max_new_tokens=20)
UpperCamelCase__ : List[str] = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(UpperCAmelCase_):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : int):
import torch
# Classic `model_kwargs`
UpperCamelCase__ : Tuple = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
UpperCamelCase__ : List[str] = pipe('This is a test')
self.assertEqual(
UpperCAmelCase_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ : List[str] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
UpperCamelCase__ : List[Any] = pipe('This is a test')
self.assertEqual(
UpperCAmelCase_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ : int = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto')
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
UpperCamelCase__ : Any = pipe('This is a test')
self.assertEqual(
UpperCAmelCase_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def __UpperCamelCase ( self : Dict):
import torch
UpperCamelCase__ : Dict = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa)
pipe('This is a test')
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : Tuple):
import torch
UpperCamelCase__ : Dict = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa)
pipe('This is a test' , do_sample=UpperCAmelCase_ , top_p=0.5)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[Any] = 'Hello world'
UpperCamelCase__ : Optional[Any] = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2')
if text_generator.model.framework == "tf":
UpperCamelCase__ : Union[str, Any] = logging.get_logger('transformers.generation.tf_utils')
else:
UpperCamelCase__ : List[Any] = logging.get_logger('transformers.generation.utils')
UpperCamelCase__ : Any = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(UpperCAmelCase_) as cl:
UpperCamelCase__ : List[str] = text_generator(UpperCAmelCase_ , max_length=10 , max_new_tokens=1)
self.assertIn(UpperCAmelCase_ , cl.out)
# The user only sets one -> no warning
with CaptureLogger(UpperCAmelCase_) as cl:
UpperCamelCase__ : Tuple = text_generator(UpperCAmelCase_ , max_new_tokens=1)
self.assertNotIn(UpperCAmelCase_ , cl.out)
with CaptureLogger(UpperCAmelCase_) as cl:
UpperCamelCase__ : List[Any] = text_generator(UpperCAmelCase_ , max_length=10)
self.assertNotIn(UpperCAmelCase_ , cl.out)
| 596 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( ) -> Dict:
UpperCamelCase__ : List[Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
UpperCamelCase__ : int = Dataset.from_dict(lowerCamelCase_)
return dataset
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : str = get_dataset()
UpperCamelCase__ : Dict = make_duplicate_clusters(UpperCAmelCase_ , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Any = get_dataset()
UpperCamelCase__, UpperCamelCase__ : Tuple = deduplicate_dataset(UpperCAmelCase_)
self.assertEqual(len(UpperCAmelCase_) , 2)
print(UpperCAmelCase_)
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2)
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCAmelCase_)
| 596 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a__ ( unittest.TestCase ):
@require_torch
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
lowerCamelCase_: List[Any] = load_dataset("""ashraq/esc50""" )
lowerCamelCase_: str = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCamelCase_: Dict = audio_classifier(A_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(A_ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
@slow
@require_torch
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
lowerCamelCase_: Optional[Any] = load_dataset("""ashraq/esc50""" )
lowerCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCamelCase_: Tuple = audio_classifier(A_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(A_ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
lowerCamelCase_: str = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(A_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
lowerCamelCase_: Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
pass
| 711 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase : str = sys.version_info >= (3, 1_0)
def UpperCAmelCase_ ( _UpperCAmelCase=None , _UpperCAmelCase=None ):
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class a__ :
_A = 42
_A = 42
_A = 42
_A = 42
@dataclass
class a__ :
_A = 42
_A = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class a__ :
_A = False
_A = True
_A = None
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "titi"
_A = "toto"
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "titi"
_A = "toto"
_A = 42
@dataclass
class a__ :
_A = "toto"
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_: Any = BasicEnum(self.foo )
@dataclass
class a__ :
_A = "toto"
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class a__ :
_A = None
_A = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"} )
_A = None
_A = list_field(default=[] )
_A = list_field(default=[] )
@dataclass
class a__ :
_A = list_field(default=[] )
_A = list_field(default=[1, 2, 3] )
_A = list_field(default=["Hallo", "Bonjour", "Hello"] )
_A = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class a__ :
_A = field()
_A = field()
_A = field()
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: int = BasicEnum(self.required_enum )
@dataclass
class a__ :
_A = 42
_A = field()
_A = None
_A = field(default="toto" , metadata={"help": "help message"} )
_A = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class a__ :
_A = False
_A = True
_A = None
@dataclass
class a__ :
_A = None
_A = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"} )
_A = None
_A = list_field(default=[] )
_A = list_field(default=[] )
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Any , A_ : argparse.ArgumentParser , A_ : argparse.ArgumentParser ) -> List[Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCamelCase_: Any = {k: v for k, v in vars(A_ ).items() if k != """container"""}
lowerCamelCase_: Any = {k: v for k, v in vars(A_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , A_ ) and yy.get("""choices""" , A_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](A_ ) , yy["""type"""](A_ ) )
del xx["type"], yy["type"]
self.assertEqual(A_ , A_ )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: str = HfArgumentParser(A_ )
lowerCamelCase_: str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A_ , required=A_ )
expected.add_argument("""--bar""" , type=A_ , required=A_ )
expected.add_argument("""--baz""" , type=A_ , required=A_ )
expected.add_argument("""--flag""" , type=A_ , default=A_ , const=A_ , nargs="""?""" )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: List[Any] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((lowerCamelCase_) , ): int = parser.parse_args_into_dataclasses(A_ , look_for_args_file=A_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_: int = HfArgumentParser(A_ )
lowerCamelCase_: int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=A_ )
expected.add_argument("""--baz""" , default="""toto""" , type=A_ , help="""help message""" )
self.argparsersEqual(A_ , A_ )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A_ , default=A_ , const=A_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=A_ , default=A_ , const=A_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=A_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=A_ , default=A_ )
lowerCamelCase_: int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A_ )
for dataclass_type in dataclass_types:
lowerCamelCase_: Any = HfArgumentParser(A_ )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: int = parser.parse_args([] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
lowerCamelCase_: List[str] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
lowerCamelCase_: List[str] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
lowerCamelCase_: List[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
lowerCamelCase_: int = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_: int = HfArgumentParser(A_ )
lowerCamelCase_: str = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowerCamelCase_: List[str] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCamelCase_: List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowerCamelCase_: Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCamelCase_: Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
lowerCamelCase_: Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
@dataclass
class a__ :
_A = "toto"
lowerCamelCase_: Union[str, Any] = HfArgumentParser(A_ )
lowerCamelCase_: List[Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: List[str] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowerCamelCase_: Any = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowerCamelCase_: Optional[Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[Any] = HfArgumentParser(A_ )
lowerCamelCase_: Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=A_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=A_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=A_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=A_ )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: List[str] = parser.parse_args([] )
self.assertEqual(
A_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCamelCase_: Union[str, Any] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(A_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=A_ , type=A_ )
expected.add_argument("""--bar""" , default=A_ , type=A_ , help="""help message""" )
expected.add_argument("""--baz""" , default=A_ , type=A_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=A_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=A_ )
lowerCamelCase_: Union[str, Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A_ )
for dataclass_type in dataclass_types:
lowerCamelCase_: Tuple = HfArgumentParser(A_ )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: Union[str, Any] = parser.parse_args([] )
self.assertEqual(A_ , Namespace(foo=A_ , bar=A_ , baz=A_ , ces=[] , des=[] ) )
lowerCamelCase_: Optional[int] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(A_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_: Optional[int] = HfArgumentParser(A_ )
lowerCamelCase_: Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=A_ , required=A_ )
expected.add_argument("""--required_str""" , type=A_ , required=A_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=A_ , )
self.argparsersEqual(A_ , A_ )
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: Any = HfArgumentParser(A_ )
lowerCamelCase_: Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A_ , required=A_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=A_ , )
expected.add_argument("""--opt""" , type=A_ , default=A_ )
expected.add_argument("""--baz""" , default="""toto""" , type=A_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=A_ )
self.argparsersEqual(A_ , A_ )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_: Tuple = HfArgumentParser(A_ )
lowerCamelCase_: List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowerCamelCase_: Optional[int] = parser.parse_dict(A_ )[0]
lowerCamelCase_: Optional[Any] = BasicExample(**A_ )
self.assertEqual(A_ , A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Optional[int] = HfArgumentParser(A_ )
lowerCamelCase_: Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(A_ , parser.parse_dict , A_ , allow_extra_keys=A_ )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = HfArgumentParser(A_ )
lowerCamelCase_: int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_: Union[str, Any] = os.path.join(A_ , """temp_json""" )
os.mkdir(A_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(A_ , A_ )
lowerCamelCase_: List[str] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
lowerCamelCase_: List[str] = BasicExample(**A_ )
self.assertEqual(A_ , A_ )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_: str = HfArgumentParser(A_ )
lowerCamelCase_: List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_: Tuple = os.path.join(A_ , """temp_yaml""" )
os.mkdir(A_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(A_ , A_ )
lowerCamelCase_: List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
lowerCamelCase_: str = BasicExample(**A_ )
self.assertEqual(A_ , A_ )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_: List[str] = HfArgumentParser(A_ )
self.assertIsNotNone(A_ )
| 584 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None )-> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __a :
UpperCamelCase_ : Dict = OPTConfig
UpperCamelCase_ : List[Any] = {}
UpperCamelCase_ : Union[str, Any] = "gelu"
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=20 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : str=16 , )-> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = embed_dim
UpperCamelCase = word_embed_proj_dim
UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : str )-> Tuple:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCAmelCase_ , **self.config_updates , )
UpperCamelCase = prepare_opt_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] )-> List[Any]:
"""simple docstring"""
UpperCamelCase = TFOPTModel(config=UpperCAmelCase_ )
UpperCamelCase = inputs_dict["input_ids"]
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict["attention_mask"][:1, :]
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 )
@require_tf
class __a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : List[str] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Dict = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[str] = 10
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCamelCase = TFOPTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
if hasattr(UpperCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCamelCase = model_class(config=UpperCAmelCase_ )
UpperCamelCase = _get_word_embedding_weight(UpperCAmelCase_ , model.get_input_embeddings() )
UpperCamelCase = _get_word_embedding_weight(UpperCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCAmelCase_ )
UpperCamelCase = _get_word_embedding_weight(UpperCAmelCase_ , model.get_input_embeddings() )
UpperCamelCase = _get_word_embedding_weight(UpperCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCAmelCase_ )
# check that weights remain the same after resizing
UpperCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase = False
self.assertTrue(UpperCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCAmelCase_ )
UpperCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase = False
self.assertTrue(UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ )-> str:
"""simple docstring"""
return tf.constant(__lowerCamelCase , dtype=tf.intaa )
@require_tf
class __a ( unittest.TestCase ):
UpperCamelCase_ : int = 99
def _SCREAMING_SNAKE_CASE ( self : Any )-> int:
"""simple docstring"""
UpperCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCamelCase = input_ids.shape[0]
UpperCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __a ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = TFOPTModel.from_pretrained("facebook/opt-350m" )
UpperCamelCase = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCamelCase = tf.not_equal(UpperCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
UpperCamelCase = model(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).last_hidden_state
UpperCamelCase = (1, 11, 512)
self.assertEqual(output.shape , UpperCAmelCase_ )
UpperCamelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=4e-3 ) )
UpperCamelCase = tf.function(UpperCAmelCase_ , jit_compile=UpperCAmelCase_ )
UpperCamelCase = xla_generate(UpperCAmelCase_ , UpperCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int )-> str:
"""simple docstring"""
super().setUp()
UpperCamelCase = "facebook/opt-350m"
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
UpperCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
UpperCamelCase = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCamelCase = tokenizer(UpperCAmelCase_ , return_tensors="tf" , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCamelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-4 ) )
UpperCamelCase = tf.function(UpperCAmelCase_ , jit_compile=UpperCAmelCase_ )
UpperCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class __a ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = "facebook/opt-125m"
UpperCamelCase = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCamelCase = []
UpperCamelCase = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
for prompt in self.prompts:
UpperCamelCase = tokenizer(UpperCAmelCase_ , return_tensors="tf" ).input_ids
UpperCamelCase = model.generate(UpperCAmelCase_ , max_length=10 )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any )-> List[Any]:
"""simple docstring"""
UpperCamelCase = "facebook/opt-350m"
UpperCamelCase = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
UpperCamelCase = "left"
# use different length sentences to test batching
UpperCamelCase = [
"Hello, my dog is a little",
"Today, I",
]
UpperCamelCase = tokenizer(UpperCAmelCase_ , return_tensors="tf" , padding=UpperCAmelCase_ )
UpperCamelCase = inputs["input_ids"]
UpperCamelCase = model.generate(input_ids=UpperCAmelCase_ , attention_mask=inputs["attention_mask"] )
UpperCamelCase = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCAmelCase_ )
UpperCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
UpperCamelCase = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCAmelCase_ , max_length=model.config.max_length - num_paddings )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [non_padded_sentence, padded_sentence] )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = "facebook/opt-350m"
UpperCamelCase = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCamelCase = []
UpperCamelCase = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
for prompt in self.prompts:
UpperCamelCase = tokenizer(UpperCAmelCase_ , return_tensors="tf" ).input_ids
UpperCamelCase = model.generate(UpperCAmelCase_ , max_length=10 )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 554 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCamelCase (__lowerCamelCase : List[Any] , __lowerCamelCase : int ) -> Optional[int]:
a__ = _distribute_shards(**__lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def _lowerCamelCase (__lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> int:
a__ = _split_gen_kwargs(__lowerCamelCase , __lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def _lowerCamelCase (__lowerCamelCase : Any , __lowerCamelCase : int ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCamelCase ):
_number_of_shards_in_gen_kwargs(__lowerCamelCase )
else:
a__ = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
assert out == expected
| 489 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase = 2_5_0_0_0_4
lowercase = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __A( lowerCamelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MBartaaTokenizer
SCREAMING_SNAKE_CASE = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowercase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartaaTokenizer(__UpperCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : str ):
lowerCamelCase_ = """<s>"""
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCamelCase ) , 1_0_5_4 )
def lowercase__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def lowercase__ ( self : str ):
lowerCamelCase_ = MBartaaTokenizer(__UpperCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__UpperCamelCase )
lowerCamelCase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCamelCase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase__ ( self : Optional[int] ):
# fmt: off
lowerCamelCase_ = {"""input_ids""": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def lowercase__ ( self : Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = '''facebook/mbart-large-50-one-to-many-mmt'''
SCREAMING_SNAKE_CASE = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
SCREAMING_SNAKE_CASE = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def lowercase__ ( cls : Optional[Any] ):
lowerCamelCase_ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
lowerCamelCase_ = 1
return cls
def lowercase__ ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 2_5_0_0_3_8 )
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def lowercase__ ( self : Tuple ):
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __UpperCamelCase )
lowerCamelCase_ = 1_0
lowerCamelCase_ = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( self : Any ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = MBartaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase_ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase_ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="""pt""" )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=1_0 , return_tensors="""pt""" )
lowerCamelCase_ = targets["""input_ids"""]
lowerCamelCase_ = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
"""input_ids""": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 700 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase ( UpperCAmelCase__ : Any ) -> List[str]:
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(UpperCAmelCase__ , """_dynamo""" ):
return False
return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : bool = True ) -> str:
lowerCamelCase_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCamelCase_ = is_compiled_module(UpperCAmelCase__ )
if is_compiled:
lowerCamelCase_ = model
lowerCamelCase_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase_ = model.module
if not keep_fpaa_wrapper:
lowerCamelCase_ = getattr(UpperCAmelCase__ , """forward""" )
lowerCamelCase_ = model.__dict__.pop("""_original_forward""" , UpperCAmelCase__ )
if original_forward is not None:
while hasattr(UpperCAmelCase__ , """__wrapped__""" ):
lowerCamelCase_ = forward.__wrapped__
if forward == original_forward:
break
lowerCamelCase_ = forward
if getattr(UpperCAmelCase__ , """_converted_to_transformer_engine""" , UpperCAmelCase__ ):
convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ )
if is_compiled:
lowerCamelCase_ = model
lowerCamelCase_ = compiled_model
return model
def __lowerCAmelCase ( ) -> Any:
PartialState().wait_for_everyone()
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCAmelCase__ , UpperCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
@contextmanager
def __lowerCAmelCase ( **UpperCAmelCase__ : Any ) -> Dict:
for key, value in kwargs.items():
lowerCamelCase_ = str(UpperCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) -> str:
if not hasattr(UpperCAmelCase__ , """__qualname__""" ) and not hasattr(UpperCAmelCase__ , """__name__""" ):
lowerCamelCase_ = getattr(UpperCAmelCase__ , """__class__""" , UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , """__qualname__""" ):
return obj.__qualname__
if hasattr(UpperCAmelCase__ , """__name__""" ):
return obj.__name__
return str(UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
for key, value in source.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase_ = destination.setdefault(UpperCAmelCase__ , {} )
merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ )
else:
lowerCamelCase_ = value
return destination
def __lowerCAmelCase ( UpperCAmelCase__ : int = None ) -> bool:
if port is None:
lowerCamelCase_ = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 103 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : int , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : str , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : float , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
UpperCAmelCase_ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ = int(shortest_edge / crop_pct )
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[str] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 82 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ = len(bin(lowerCAmelCase__ )[3:] )
UpperCAmelCase_ = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(lowerCAmelCase__ ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
def __A ( ):
_UpperCAmelCase : List[str] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Union[str, Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : List[str] = KandinskyVaaControlnetImgaImgPipeline
snake_case : str = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
snake_case : Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
snake_case : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case : Tuple = False
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return self.time_input_dim
@property
def snake_case_ (self ):
return self.time_input_dim * 4
@property
def snake_case_ (self ):
return 1_0_0
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : Dict = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_UpperCAmelCase : Dict = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def snake_case_ (self ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.dummy_unet
_UpperCAmelCase : str = self.dummy_movq
_UpperCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_UpperCAmelCase : List[str] = DDIMScheduler(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
_UpperCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
_UpperCAmelCase : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create hint
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ (self ):
_UpperCAmelCase : Dict = """cpu"""
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : Tuple = self.pipeline_class(**lowerCAmelCase__ )
_UpperCAmelCase : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : Dict = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ):
_UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
_UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_UpperCAmelCase : str = init_image.resize((5_1_2, 5_1_2) )
_UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_UpperCAmelCase : Any = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 2_5_5.0
_UpperCAmelCase : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_UpperCAmelCase : List[Any] = """A robot, 4k photo"""
_UpperCAmelCase : Optional[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
_UpperCAmelCase : Any = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_UpperCAmelCase : List[str] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = pipe_prior(
lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.8_5 , generator=lowerCAmelCase__ , negative_prompt="""""" , ).to_tuple()
_UpperCAmelCase : Optional[Any] = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
_UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 156 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : str , __lowercase : Dict , __lowercase : Optional[Any]=13 , __lowercase : str=32 , __lowercase : Union[str, Any]=3 , __lowercase : int=4 , __lowercase : int=[10, 20, 30, 40] , __lowercase : List[str]=[2, 2, 3, 2] , __lowercase : Dict=True , __lowercase : List[Any]=True , __lowercase : str=37 , __lowercase : Tuple="gelu" , __lowercase : int=10 , __lowercase : Dict=0.02 , __lowercase : Dict=["stage2", "stage3", "stage4"] , __lowercase : Optional[int]=[2, 3, 4] , __lowercase : List[Any]=None , ) -> Dict:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Any = num_stages
__UpperCAmelCase : str = hidden_sizes
__UpperCAmelCase : Optional[int] = depths
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : List[Any] = num_labels
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Dict ) -> str:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Any ) -> List[Any]:
__UpperCAmelCase : Tuple = ConvNextVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self : Optional[int] , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : List[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = ConvNextVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : List[str] ) -> Tuple:
__UpperCAmelCase : int = ConvNextVaBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[Any] = model(__lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a : List[Any] = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a : List[str] = False
a : Union[str, Any] = False
a : Optional[Any] = False
a : Optional[Any] = False
a : Tuple = False
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def UpperCAmelCase ( self : int ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : Any ) -> str:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCAmelCase ( self : Tuple ) -> str:
pass
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : int = True
if model_class.__name__ in [
*get_values(__lowercase ),
*get_values(__lowercase ),
]:
continue
__UpperCAmelCase : List[str] = model_class(__lowercase )
model.to(__lowercase )
model.train()
__UpperCAmelCase : Dict = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__UpperCAmelCase : str = model(**__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = True
if (
model_class.__name__
in [*get_values(__lowercase ), *get_values(__lowercase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : Tuple = model_class(__lowercase )
model.to(__lowercase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : int = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__UpperCAmelCase : int = model(**__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[str] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Dict ) -> List[Any]:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict ):
__UpperCAmelCase : str = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : Dict ) -> str:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Dict ) -> int:
__UpperCAmelCase : Optional[int] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__lowercase )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : List[Any] = preprocessor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : int = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 63 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : List[str]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
UpperCAmelCase__ = AutoModel.from_pretrained(_UpperCAmelCase , return_dict=_UpperCAmelCase )
UpperCAmelCase__ = torch.nn.CosineSimilarity(3 , 1E-08 )
UpperCAmelCase__ = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
return self.bert(**_UpperCAmelCase ).last_hidden_state
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : List[str] ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(_UpperCAmelCase , _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = W_supports["""sizes"""].tolist()
UpperCAmelCase__ = W_supports["""start_token_id"""].item()
UpperCAmelCase__ = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase__ = self.BERT(**_UpperCAmelCase )
UpperCAmelCase__ = self.BERT(**_UpperCAmelCase )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = W_supports["""input_ids"""] == start_token_id
UpperCAmelCase__ = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(_UpperCAmelCase ):
if i == 0:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = support_sizes[i - 1]
UpperCAmelCase__ = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase__ = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase__ = torch.vstack((p_starts, p_start) )
UpperCAmelCase__ = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase__ = p_start
UpperCAmelCase__ = p_end
return p_starts, p_ends
| 603 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : List[str] =logging.get_logger(__name__)
_A : int ={
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roberta"""
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any]=50_265 , UpperCamelCase__: Any=768 , UpperCamelCase__: Optional[int]=12 , UpperCamelCase__: Any=12 , UpperCamelCase__: Optional[Any]=3_072 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=0.1 , UpperCamelCase__: str=0.1 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[Any]=1e-12 , UpperCamelCase__: Union[str, Any]=1 , UpperCamelCase__: str=0 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]="absolute" , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Union[str, Any] = use_cache
lowerCamelCase__ : Any = classifier_dropout
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: Tuple ):
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 631 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Tuple = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["""MobileViTFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def lowerCamelCase_ ( self : Any , __magic_name__ : Tuple=False ):
"""simple docstring"""
if class_cond:
UpperCamelCase = self.dummy_cond_unet
else:
UpperCamelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = ConsistencyModelPipeline(**__magic_name__ )
UpperCamelCase = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = pipe(**__magic_name__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components(class_cond=__magic_name__ )
UpperCamelCase = ConsistencyModelPipeline(**__magic_name__ )
UpperCamelCase = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = 0
UpperCamelCase = pipe(**__magic_name__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = ConsistencyModelPipeline(**__magic_name__ )
UpperCamelCase = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = 1
UpperCamelCase = None
UpperCamelCase = pipe(**__magic_name__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components(class_cond=__magic_name__ )
UpperCamelCase = ConsistencyModelPipeline(**__magic_name__ )
UpperCamelCase = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = 1
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = pipe(**__magic_name__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] , __magic_name__ : str=0 , __magic_name__ : List[Any]=False , __magic_name__ : List[str]="cpu" , __magic_name__ : List[Any]=torch.floataa , __magic_name__ : List[str]=(1, 3, 6_4, 6_4) ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(__magic_name__ )
UpperCamelCase = {
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
UpperCamelCase = self.get_fixed_latents(seed=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ , shape=__magic_name__ )
UpperCamelCase = latents
return inputs
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : List[str]=0 , __magic_name__ : str="cpu" , __magic_name__ : Dict=torch.floataa , __magic_name__ : Tuple=(1, 3, 6_4, 6_4) ):
"""simple docstring"""
if type(__magic_name__ ) == str:
UpperCamelCase = torch.device(__magic_name__ )
UpperCamelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
return latents
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase = ConsistencyModelPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(torch_device=__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**__magic_name__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase = ConsistencyModelPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(torch_device=__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_inputs()
UpperCamelCase = 1
UpperCamelCase = None
UpperCamelCase = pipe(**__magic_name__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase = ConsistencyModelPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(torch_device=__magic_name__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_inputs(get_fixed_latents=__magic_name__ , device=__magic_name__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__magic_name__ , enable_math=__magic_name__ , enable_mem_efficient=__magic_name__ ):
UpperCamelCase = pipe(**__magic_name__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase = ConsistencyModelPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(torch_device=__magic_name__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_inputs(get_fixed_latents=__magic_name__ , device=__magic_name__ )
UpperCamelCase = 1
UpperCamelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__magic_name__ , enable_math=__magic_name__ , enable_mem_efficient=__magic_name__ ):
UpperCamelCase = pipe(**__magic_name__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 386 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__magic_name__ = None
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
__magic_name__ = '▁'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = BigBirdTokenizer
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : str ,_a : Any=None ,_a : Optional[Any]=None ,_a : str="<unk>" ,_a : Union[str, Any]="<s>" ,_a : Optional[Any]="</s>" ,_a : Any="<pad>" ,_a : str="[SEP]" ,_a : Dict="[MASK]" ,_a : int="[CLS]" ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,**_a ,)
A_ : int = vocab_file
A_ : str = False if not self.vocab_file else True
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : List[Any] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : str ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Union[str, Any] = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file ,_a )
return (out_vocab_file,)
| 27 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 1 |
import logging
from transformers import PretrainedConfig
_A : Optional[Any] = logging.getLogger(__name__)
_A : List[Any] = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Tuple = """bertabs"""
def __init__( self , A_=3_05_22 , A_=5_12 , A_=6 , A_=5_12 , A_=8 , A_=5_12 , A_=0.2 , A_=6 , A_=7_68 , A_=8 , A_=20_48 , A_=0.2 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_pos
SCREAMING_SNAKE_CASE__ = enc_layers
SCREAMING_SNAKE_CASE__ = enc_hidden_size
SCREAMING_SNAKE_CASE__ = enc_heads
SCREAMING_SNAKE_CASE__ = enc_ff_size
SCREAMING_SNAKE_CASE__ = enc_dropout
SCREAMING_SNAKE_CASE__ = dec_layers
SCREAMING_SNAKE_CASE__ = dec_hidden_size
SCREAMING_SNAKE_CASE__ = dec_heads
SCREAMING_SNAKE_CASE__ = dec_ff_size
SCREAMING_SNAKE_CASE__ = dec_dropout
| 100 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE : Any = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_SCREAMING_SNAKE_CASE : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE : Any = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
_SCREAMING_SNAKE_CASE : Any = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__snake_case , __snake_case )
def UpperCAmelCase_ ( self , **__snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def UpperCAmelCase_ ( self , **__snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def UpperCAmelCase_ ( self , **__snake_case ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
_SCREAMING_SNAKE_CASE : str = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
_SCREAMING_SNAKE_CASE : Any = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
_SCREAMING_SNAKE_CASE : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : List[Any] = image_processor(__snake_case , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_SCREAMING_SNAKE_CASE : str = """lower newer"""
_SCREAMING_SNAKE_CASE : Any = processor(text=__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_SCREAMING_SNAKE_CASE : Any = """lower newer"""
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : List[str] = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : List[Any] = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : List[Any] = processor(images=__snake_case , visual_prompt=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE : int = processor.batch_decode(__snake_case )
_SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 533 | 0 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__snake_case : List[Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
__snake_case : List[str] = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def _lowercase ( __snake_case ,__snake_case=False ) -> List[Any]:
__lowerCAmelCase : Union[str, Any] = create_model(
"HTSAT-tiny" ,"roberta" ,__snake_case ,precision="fp32" ,device="cuda:0" if torch.cuda.is_available() else "cpu" ,enable_fusion=__snake_case ,fusion_type="aff_2d" if enable_fusion else None ,)
return model, model_cfg
def _lowercase ( __snake_case ) -> Optional[int]:
__lowerCAmelCase : int = {}
__lowerCAmelCase : str = R'.*sequential.(\d+).*'
__lowerCAmelCase : Dict = R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase : List[Any] = key.replace(__snake_case ,__snake_case )
if re.match(__snake_case ,__snake_case ):
# replace sequential layers with list
__lowerCAmelCase : Dict = re.match(__snake_case ,__snake_case ).group(1 )
__lowerCAmelCase : str = key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(__snake_case )//3}.linear.""" )
elif re.match(__snake_case ,__snake_case ):
__lowerCAmelCase : str = int(re.match(__snake_case ,__snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase : Any = 1 if projecton_layer == 0 else 2
__lowerCAmelCase : Optional[Any] = key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase : str = value
__lowerCAmelCase : int = mixed_qkv.size(0 ) // 3
__lowerCAmelCase : Union[str, Any] = mixed_qkv[:qkv_dim]
__lowerCAmelCase : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase : str = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase : Optional[int] = query_layer
__lowerCAmelCase : List[str] = key_layer
__lowerCAmelCase : Dict = value_layer
else:
__lowerCAmelCase : Union[str, Any] = value
return model_state_dict
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=False ) -> str:
__lowerCAmelCase : Any = init_clap(__snake_case ,enable_fusion=__snake_case )
clap_model.eval()
__lowerCAmelCase : List[str] = clap_model.state_dict()
__lowerCAmelCase : Tuple = rename_state_dict(__snake_case )
__lowerCAmelCase : int = ClapConfig()
__lowerCAmelCase : Tuple = enable_fusion
__lowerCAmelCase : List[str] = ClapModel(__snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(__snake_case ,strict=__snake_case )
model.save_pretrained(__snake_case )
transformers_config.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
__snake_case : List[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 717 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.get_dummy_input()
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: Dict=False , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = 4
__lowerCAmelCase : List[str] = 32
__lowerCAmelCase : str = (32, 32)
__lowerCAmelCase : int = torch.manual_seed(0)
__lowerCAmelCase : Dict = torch.device(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = (batch_size, num_channels) + sizes
__lowerCAmelCase : Dict = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = {"hidden_states": hidden_states}
if include_temb:
__lowerCAmelCase : List[str] = 128
__lowerCAmelCase : Dict = randn_tensor((batch_size, temb_channels) , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
if include_res_hidden_states_tuple:
__lowerCAmelCase : Dict = torch.manual_seed(1)
__lowerCAmelCase : int = (randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE),)
if include_encoder_hidden_states:
__lowerCAmelCase : int = floats_tensor((batch_size, 32, 32)).to(_SCREAMING_SNAKE_CASE)
if include_skip_sample:
__lowerCAmelCase : Optional[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
return dummy_input
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
__lowerCAmelCase : Tuple = 32
if self.block_type == "mid":
init_dict.pop("out_channels")
__lowerCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase : Dict = self.block_class(**_SCREAMING_SNAKE_CASE)
unet_block.to(_SCREAMING_SNAKE_CASE)
unet_block.eval()
with torch.no_grad():
__lowerCAmelCase : int = unet_block(**_SCREAMING_SNAKE_CASE)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = output[0]
self.assertEqual(output.shape , self.output_shape)
__lowerCAmelCase : str = output[0, -1, -3:, -3:]
__lowerCAmelCase : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE)
assert torch_all_close(output_slice.flatten() , _SCREAMING_SNAKE_CASE , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase : str = self.block_class(**_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.train()
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Union[str, Any] = output[0]
__lowerCAmelCase : List[Any] = torch.device(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = randn_tensor(output.shape , device=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
loss.backward()
| 615 | 0 |
'''simple docstring'''
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->list[int]:
snake_case__ = int(UpperCAmelCase_ )
# Initialize Result
snake_case__ = []
# Traverse through all denomination
for denomination in reversed(UpperCAmelCase_ ):
# Find denominations
while int(UpperCAmelCase_ ) >= int(UpperCAmelCase_ ):
total_value -= int(UpperCAmelCase_ )
answer.append(UpperCAmelCase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
a__ : int = []
a__ : Any = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
a__ : Union[str, Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
a__ : Tuple = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
a__ : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
a__ : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f"""Following is minimal change for {value}: """)
a__ : List[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 368 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a__ : Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , ) ->Optional[int]:
output_path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase_ , UpperCAmelCase_ , f=output_path.as_posix() , input_names=UpperCAmelCase_ , output_names=UpperCAmelCase_ , dynamic_axes=UpperCAmelCase_ , do_constant_folding=UpperCAmelCase_ , use_external_data_format=UpperCAmelCase_ , enable_onnx_checker=UpperCAmelCase_ , opset_version=UpperCAmelCase_ , )
else:
export(
UpperCAmelCase_ , UpperCAmelCase_ , f=output_path.as_posix() , input_names=UpperCAmelCase_ , output_names=UpperCAmelCase_ , dynamic_axes=UpperCAmelCase_ , do_constant_folding=UpperCAmelCase_ , opset_version=UpperCAmelCase_ , )
@torch.no_grad()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ) ->Union[str, Any]:
snake_case__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case__ = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
snake_case__ = 'cpu'
snake_case__ = Path(UpperCAmelCase_ )
# VAE DECODER
snake_case__ = AutoencoderKL.from_pretrained(model_path + '/vae' )
snake_case__ = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case__ = vae_decoder.decode
onnx_export(
UpperCAmelCase_ , model_args=(
torch.randn(1 , UpperCAmelCase_ , 25 , 25 ).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCAmelCase_ , )
del vae_decoder
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
a__ : List[str] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 368 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Dict ={
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =[
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_lowercase : Optional[Any] =['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowercase : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =['''pixel_values''']
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = None , A = True , A = 1 / 2_5_5 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**a_ )
_UpperCAmelCase : Any = size if size is not None else {"shortest_edge": 2_2_4}
_UpperCAmelCase : List[str] = get_size_dict(a_ , default_to_square=a_ )
_UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_UpperCAmelCase : List[str] = get_size_dict(a_ , param_name='''crop_size''' )
_UpperCAmelCase : int = do_resize
_UpperCAmelCase : int = size
_UpperCAmelCase : Tuple = do_center_crop
_UpperCAmelCase : Dict = crop_size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : Dict = do_rescale
_UpperCAmelCase : Optional[int] = rescale_factor
_UpperCAmelCase : Optional[int] = do_normalize
_UpperCAmelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> np.ndarray:
_UpperCAmelCase : Dict = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" in size:
_UpperCAmelCase : int = get_resize_output_image_size(a_ , size['''shortest_edge'''] , default_to_square=a_ )
elif "height" in size and "width" in size:
_UpperCAmelCase : Union[str, Any] = (size["height"], size["width"])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def __lowerCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
_UpperCAmelCase : Optional[int] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(a_ , size=(size['''height'''], size['''width''']) , data_format=a_ , **a_ )
def __lowerCAmelCase ( self , A , A , A = None , **A , ) -> Optional[Any]:
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def __lowerCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def __lowerCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase : Union[str, Any] = to_numpy_array(a_ )
if do_resize:
_UpperCAmelCase : str = self.resize(image=a_ , size=a_ , resample=a_ )
if do_center_crop:
_UpperCAmelCase : Dict = self.center_crop(a_ , size=a_ )
if do_rescale:
_UpperCAmelCase : Tuple = self.rescale(image=a_ , scale=a_ )
if do_normalize:
_UpperCAmelCase : str = self.normalize(image=a_ , mean=a_ , std=a_ )
_UpperCAmelCase : Union[str, Any] = to_channel_dimension_format(a_ , a_ )
return image
def __lowerCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
_UpperCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[str] = resample if resample is not None else self.resample
_UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : List[Any] = size if size is not None else self.size
_UpperCAmelCase : Optional[int] = get_size_dict(a_ , default_to_square=a_ )
_UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[str] = get_size_dict(a_ , param_name='''crop_size''' )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_UpperCAmelCase : str = make_batched(a_ )
_UpperCAmelCase : List[str] = [
[
self._preprocess_image(
image=a_ , do_resize=a_ , size=a_ , resample=a_ , do_center_crop=a_ , crop_size=a_ , do_rescale=a_ , rescale_factor=a_ , do_normalize=a_ , image_mean=a_ , image_std=a_ , data_format=a_ , )
for img in video
]
for video in videos
]
_UpperCAmelCase : Tuple = {"pixel_values": videos}
return BatchFeature(data=a_ , tensor_type=a_ )
| 506 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase : List[str] = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
lowerCAmelCase : Union[str, Any] = 1_0
lowerCAmelCase : Optional[Any] = 2_5_6
def _A ( A ) -> Optional[MinHash]:
if len(A ) < MIN_NUM_TOKENS:
return None
lowercase : List[Any] = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def _A ( A ) -> Set[str]:
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , *,
a_ = 0.85 , ) -> List[str]:
lowercase : Any = duplication_jaccard_threshold
lowercase : str = NUM_PERM
lowercase : Union[str, Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase : Any = defaultdict(a_ )
def a__ ( self , a_ , a_ ) -> None:
lowercase : Dict = self._index.query(a_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(a_ , a_ )
if len(a_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a_ )
def a__ ( self ) -> List[List[Dict]]:
lowercase : str = []
for base, duplicates in self._duplicate_clusters.items():
lowercase : str = [base] + list(a_ )
# reformat the cluster to be a list of dict
lowercase : Optional[Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(a_ )
return duplicate_clusters
def a__ ( self , a_ ) -> None:
lowercase : Tuple = self.get_duplicate_clusters()
with open(a_ , "w" ) as f:
json.dump(a_ , a_ )
def _A ( A ) -> Dict:
lowercase , lowercase : List[str] = element
lowercase : int = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _A ( A ) -> Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(A ,max_queue_size=1_0_0_0_0 ) ,chunksize=1_0_0 ,):
if data is not None:
yield data
def _A ( A ,A ) -> List[str]:
lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) ,max_queue_size=1_0_0 ) ):
di.add(A ,A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _A ( A ,A ) -> float:
lowercase : int = get_tokens(A )
lowercase : Dict = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase : List[str] = None
def _A ( A ,A ) -> Union[str, Any]:
lowercase : int = []
for elementa in cluster:
lowercase : Any = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowercase : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(A ,A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase : List[Any] = 1
extremes.append(A )
return extremes
def _A ( A ,A ,A ) -> Optional[Any]:
global _shared_dataset
lowercase : Dict = dataset
lowercase : int = []
lowercase : int = partial(_find_cluster_extremes_shared ,jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A ,A ,) ,total=len(A ) ,):
extremes_list.append(A )
return extremes_list
def _A ( A ,A = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowercase : Dict = make_duplicate_clusters(A ,A )
lowercase : List[str] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowercase : Any = {}
lowercase : int = find_extremes(A ,A ,A )
for extremes in extremes_clusters:
for element in extremes:
lowercase : str = element
lowercase : str = duplicate_indices - set(extreme_dict.keys() )
lowercase : Any = dataset.filter(lambda A ,A : idx not in remove_indices ,with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase : List[str] = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowercase : str = extreme_dict[element["base_index"]]["copies"]
print(F'''Original dataset size: {len(A )}''' )
print(F'''Number of duplicate clusters: {len(A )}''' )
print(F'''Files in duplicate cluster: {len(A )}''' )
print(F'''Unique files in duplicate cluster: {len(A )}''' )
print(F'''Filtered dataset size: {len(A )}''' )
return ds_filter, duplicate_clusters
| 372 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Optional[int] = """Hello world! cécé herlolip"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = FairseqRobertaModel.from_pretrained(_lowercase )
roberta.eval() # disable dropout
UpperCAmelCase : int = roberta.model.encoder.sentence_encoder
UpperCAmelCase : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCAmelCase : Tuple = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , _lowercase )
UpperCAmelCase : List[str] = XLMRobertaXLForSequenceClassification(_lowercase ) if classification_head else XLMRobertaXLForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : str = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase : Any = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase : List[str] = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase : Any = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : str = model.roberta.encoder.layer[i]
UpperCAmelCase : Tuple = roberta_sent_encoder.layers[i]
UpperCAmelCase : Optional[Any] = layer.attention
UpperCAmelCase : Optional[int] = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase : Optional[int] = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase : int = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase : List[str] = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase : str = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase : Union[str, Any] = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase : Any = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase : str = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : List[Any] = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase : Any = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase : int = roberta_layer.final_layer_norm.weight
UpperCAmelCase : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase : List[Any] = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase : List[Any] = roberta_layer.fca.weight
UpperCAmelCase : List[str] = roberta_layer.fca.bias
# output
UpperCAmelCase : Dict = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase : Dict = roberta_layer.fca.weight
UpperCAmelCase : List[str] = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase : Union[str, Any] = roberta.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase : Any = roberta.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase : Optional[int] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase : Tuple = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : Optional[Any] = roberta.model.encoder.lm_head.weight
UpperCAmelCase : str = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : List[str] = roberta.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase : Optional[int] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase : int = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_lowercase ) )
else:
UpperCAmelCase : Tuple = roberta.model(_lowercase )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase : Dict = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a : int = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 701 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_50, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=A__ , )
assert hasattr(self , """env""" )
def __A ( self , A__ ):
A__ : int = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
A__ : str = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A__ , instance_count=A__ , instance_type=self.instance_type , debugger_hook_config=A__ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A__ , py_version="""py36""" , )
def __A ( self , A__ ):
TrainingJobAnalytics(A__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self , A__ ):
# create estimator
A__ : str = self.create_estimator(A__ )
# run training
estimator.fit()
# result dataframe
A__ : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A__ )
| 456 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __UpperCamelCase ( a : List[Any]=None ) ->List[str]:
snake_case = argparse.ArgumentParser(add_help=a , allow_abbrev=a )
# The main config parser
snake_case = config_command_parser(a )
# The subparser to add commands to
snake_case = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(a , parents=[parent_parser] )
update_command_parser(a , parents=[parent_parser] )
return config_parser
def __UpperCamelCase ( ) ->List[str]:
snake_case = get_config_parser()
snake_case = config_parser.parse_args()
if not hasattr(a , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(a )
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = value
snake_case = None
snake_case = None
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = tree
def UpperCamelCase ( self , A__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = FlaxAutoencoderKL
@property
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 4
lowerCamelCase_ = 3
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = jax.random.uniform(A_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
lowerCamelCase_ = self.dummy_input
return init_dict, inputs_dict
| 70 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase)
if n > 1:
factors.append(_UpperCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Tuple = 'deit'
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=7_68 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : str=12 , UpperCamelCase_ : List[Any]=30_72 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Any=1e-12 , UpperCamelCase_ : Optional[Any]=2_24 , UpperCamelCase_ : int=16 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , UpperCamelCase_ : int=16 , **UpperCamelCase_ : List[Any] , ) -> List[Any]:
super().__init__(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ :Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ :Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ :Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ :List[str] = hidden_act
SCREAMING_SNAKE_CASE__ :int = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :Any = initializer_range
SCREAMING_SNAKE_CASE__ :Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ :Any = image_size
SCREAMING_SNAKE_CASE__ :int = patch_size
SCREAMING_SNAKE_CASE__ :Dict = num_channels
SCREAMING_SNAKE_CASE__ :List[Any] = qkv_bias
SCREAMING_SNAKE_CASE__ :str = encoder_stride
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : int = version.parse('1.11' )
@property
def __lowerCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self : Optional[int] ) -> float:
return 1e-4
| 706 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
UpperCamelCase_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
UpperCamelCase_ = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
UpperCamelCase_ = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : str ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Dict=False ) -> List[Any]:
if concatenate_texts:
return compute_measures(UpperCamelCase_ , UpperCamelCase_ )["wer"]
else:
SCREAMING_SNAKE_CASE__ :List[str] = 0
SCREAMING_SNAKE_CASE__ :Tuple = 0
for prediction, reference in zip(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Tuple = compute_measures(UpperCamelCase_ , UpperCamelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 320 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''unispeech'''
def __init__( self : Optional[int] , lowerCAmelCase__ : int=3_2 , lowerCAmelCase__ : Optional[int]=7_6_8 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : str=3_0_7_2 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : Tuple="group" , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase__ : str=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ : Tuple=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : int=1_2_8 , lowerCAmelCase__ : Union[str, Any]=1_6 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[str]=0.05 , lowerCAmelCase__ : List[str]=1_0 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[Any]=1_0 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Tuple=3_2_0 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[Any]=1_0_0 , lowerCAmelCase__ : Any=2_5_6 , lowerCAmelCase__ : List[str]=2_5_6 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Union[str, Any]="mean" , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=2_5_6 , lowerCAmelCase__ : Tuple=8_0 , lowerCAmelCase__ : List[str]=0 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Dict=0.5 , **lowerCAmelCase__ : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Union[str, Any] = feat_extract_norm
_UpperCAmelCase : Dict = feat_extract_activation
_UpperCAmelCase : Tuple = list(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = list(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = list(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = conv_bias
_UpperCAmelCase : int = num_conv_pos_embeddings
_UpperCAmelCase : List[str] = num_conv_pos_embedding_groups
_UpperCAmelCase : Optional[int] = len(self.conv_dim )
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : List[str] = hidden_dropout
_UpperCAmelCase : Tuple = attention_dropout
_UpperCAmelCase : Dict = activation_dropout
_UpperCAmelCase : List[str] = feat_proj_dropout
_UpperCAmelCase : str = final_dropout
_UpperCAmelCase : Optional[Any] = layerdrop
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : List[str] = num_ctc_classes
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : Optional[int] = do_stable_layer_norm
_UpperCAmelCase : str = use_weighted_layer_sum
_UpperCAmelCase : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : List[str] = apply_spec_augment
_UpperCAmelCase : Optional[Any] = mask_time_prob
_UpperCAmelCase : Dict = mask_time_length
_UpperCAmelCase : str = mask_time_min_masks
_UpperCAmelCase : Dict = mask_feature_prob
_UpperCAmelCase : int = mask_feature_length
_UpperCAmelCase : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase : Optional[int] = num_codevectors_per_group
_UpperCAmelCase : Dict = num_codevector_groups
_UpperCAmelCase : Optional[int] = contrastive_logits_temperature
_UpperCAmelCase : Optional[Any] = feat_quantizer_dropout
_UpperCAmelCase : Dict = num_negatives
_UpperCAmelCase : Union[str, Any] = codevector_dim
_UpperCAmelCase : Dict = proj_codevector_dim
_UpperCAmelCase : Optional[Any] = diversity_loss_weight
# ctc loss
_UpperCAmelCase : Any = ctc_loss_reduction
_UpperCAmelCase : List[str] = ctc_zero_infinity
# pretraining loss
_UpperCAmelCase : Optional[Any] = replace_prob
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 494 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''blenderbot-small'''
UpperCamelCase_ : Tuple = ['''past_key_values''']
UpperCamelCase_ : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , lowerCAmelCase__ : Dict=5_0_2_6_5 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=8 , lowerCAmelCase__ : str=2_0_4_8 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[str]=8 , lowerCAmelCase__ : Optional[int]=2_0_4_8 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : str=2 , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Optional[Any] = d_model
_UpperCAmelCase : List[str] = encoder_ffn_dim
_UpperCAmelCase : Union[str, Any] = encoder_layers
_UpperCAmelCase : List[Any] = encoder_attention_heads
_UpperCAmelCase : Tuple = decoder_ffn_dim
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Union[str, Any] = decoder_attention_heads
_UpperCAmelCase : List[Any] = dropout
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : Any = init_std
_UpperCAmelCase : Optional[Any] = encoder_layerdrop
_UpperCAmelCase : Tuple = decoder_layerdrop
_UpperCAmelCase : List[Any] = use_cache
_UpperCAmelCase : str = encoder_layers
_UpperCAmelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase : Tuple = {0: "batch"}
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "decoder_sequence"}
_UpperCAmelCase : int = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCAmelCase : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.num_layers
for i in range(lowerCAmelCase__ ):
_UpperCAmelCase : str = {0: "batch", 2: "past_sequence + sequence"}
_UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
else:
_UpperCAmelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : Any = super().outputs
else:
_UpperCAmelCase : Union[str, Any] = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase : int = self.num_layers
for i in range(lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
_UpperCAmelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
_UpperCAmelCase : Any = seq_length if not self.use_past else 1
_UpperCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_UpperCAmelCase : Union[str, Any] = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Dict = common_inputs["input_ids"].shape
_UpperCAmelCase : Optional[Any] = common_inputs["decoder_input_ids"].shape[1]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.num_attention_heads
_UpperCAmelCase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase : Tuple = decoder_seq_length + 3
_UpperCAmelCase : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCAmelCase : Union[str, Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
_UpperCAmelCase : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCAmelCase , _UpperCAmelCase : str = self.num_layers
_UpperCAmelCase : Optional[int] = min(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
_UpperCAmelCase : List[Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
_UpperCAmelCase : Tuple = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Any = seqlen + 2
_UpperCAmelCase , _UpperCAmelCase : Any = self.num_layers
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.num_attention_heads
_UpperCAmelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase : List[str] = common_inputs["attention_mask"].dtype
_UpperCAmelCase : Tuple = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
_UpperCAmelCase : Any = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase : Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCAmelCase : Tuple = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
_UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
_UpperCAmelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 494 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
lowerCAmelCase : Optional[int] = None
def __init__( self : int , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]="<unk>" , _lowercase : Optional[Any]="<s>" , _lowercase : List[Any]="</s>" , _lowercase : List[str]="<pad>" , _lowercase : Any=False , _lowercase : Tuple=False , **_lowercase : Any , ):
"""simple docstring"""
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , add_prefix_space=_lowercase , clean_up_tokenization_spaces=_lowercase , **_lowercase , )
_UpperCamelCase: Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowercase ) != add_prefix_space:
_UpperCamelCase: Dict = getattr(_lowercase , pre_tok_state.pop('''type''' ) )
_UpperCamelCase: Optional[int] = add_prefix_space
_UpperCamelCase: Optional[int] = pre_tok_class(**_lowercase )
_UpperCamelCase: Dict = add_prefix_space
def lowerCAmelCase ( self : List[str] , *_lowercase : Optional[int] , **_lowercase : int ):
"""simple docstring"""
_UpperCamelCase: int = kwargs.get('''is_split_into_words''' , _lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowercase , **_lowercase )
def lowerCAmelCase ( self : Any , *_lowercase : List[str] , **_lowercase : List[Any] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = kwargs.get('''is_split_into_words''' , _lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*_lowercase , **_lowercase )
def lowerCAmelCase ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
_UpperCamelCase: Tuple = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def lowerCAmelCase ( self : Optional[Any] , _lowercase : "Conversation" ):
"""simple docstring"""
_UpperCamelCase: List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase , add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
_UpperCamelCase: List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 264 |
def lowerCAmelCase_ ( lowercase: float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(lowercase , lowercase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase_ ( lowercase: float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(lowercase , lowercase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Dict )-> Optional[int]:
'''simple docstring'''
__snake_case = AlbertConfig.from_json_file(_lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__snake_case = AlbertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 24 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : list[list[int]] = []
create_all_state(1 , lowerCAmelCase_ , lowerCAmelCase_ , [] , lowerCAmelCase_)
return result
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:])
return
for i in range(lowerCAmelCase_ , total_number - level + 2):
current_list.append(lowerCAmelCase_)
create_all_state(i + 1 , lowerCAmelCase_ , level - 1 , lowerCAmelCase_ , lowerCAmelCase_)
current_list.pop()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
for i in total_list:
print(*lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = 4
__magic_name__ = 2
__magic_name__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 250 | 0 |
'''simple docstring'''
def _a ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_snake_case : Tuple = generate_large_matrix()
_snake_case : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _a ( _SCREAMING_SNAKE_CASE : list[list[int]] ):
assert all(row == sorted(_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(_SCREAMING_SNAKE_CASE ) == sorted(_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE ) for col in zip(*_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE : list[int] ):
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_SCREAMING_SNAKE_CASE = (left + right) // 2
_SCREAMING_SNAKE_CASE = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_SCREAMING_SNAKE_CASE = mid + 1
else:
_SCREAMING_SNAKE_CASE = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : list[list[int]] ):
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(grid[0] )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_SCREAMING_SNAKE_CASE = find_negative_index(grid[i][:bound] )
total += bound
return (len(_SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def _a ( _SCREAMING_SNAKE_CASE : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _a ( _SCREAMING_SNAKE_CASE : list[list[int]] ):
_SCREAMING_SNAKE_CASE = 0
for row in grid:
for i, number in enumerate(_SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(_SCREAMING_SNAKE_CASE ) - i
break
return total
def _a ( ):
from timeit import timeit
print("Running benchmarks" )
_SCREAMING_SNAKE_CASE = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_SCREAMING_SNAKE_CASE = timeit(F'{func}(grid=grid)' , setup=_SCREAMING_SNAKE_CASE , number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 717 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : str = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : List[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Any = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Any = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : int = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : str = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Any = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Any = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : str = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : int = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : List[str] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Optional[int] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : str = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : str = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Optional[int] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase ( metaclass=__UpperCAmelCase ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
requires_backends(self , ["sentencepiece"] )
| 493 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def UpperCAmelCase ( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ViTMSNModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = ViTMSNForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = ViTMSNForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
__a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__a = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
def UpperCAmelCase ( self ):
_UpperCAmelCase = ViTMSNModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def UpperCAmelCase ( self ):
pass
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ViTMSNModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
torch.manual_seed(2 )
_UpperCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 518 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
a = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
a = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
a = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
a = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
a = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
a = ""
a = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Dict:
assert ReadMe.from_string(snake_case , snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Union[str, Any]:
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ):
_UpperCAmelCase = ReadMe.from_string(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple:
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(snake_case , snake_case )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
ReadMe.from_string(snake_case , snake_case , suppress_parsing_errors=snake_case )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = ReadMe.from_readme(snake_case , snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
_UpperCAmelCase = ReadMe.from_readme(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
ReadMe.from_readme(snake_case , snake_case )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
ReadMe.from_readme(snake_case , snake_case , suppress_parsing_errors=snake_case )
| 518 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A( __snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = KandinskyVaaPipeline
SCREAMING_SNAKE_CASE__ = [
'image_embeds',
'negative_image_embeds',
]
SCREAMING_SNAKE_CASE__ = ['image_embeds', 'negative_image_embeds']
SCREAMING_SNAKE_CASE__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE__ = False
@property
def UpperCAmelCase_ (self ):
return 32
@property
def UpperCAmelCase_ (self ):
return 32
@property
def UpperCAmelCase_ (self ):
return self.time_input_dim
@property
def UpperCAmelCase_ (self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ (self ):
return 1_00
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase__ = UNetaDConditionModel(**A_ )
return model
@property
def UpperCAmelCase_ (self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=A_ , )
UpperCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("""mps""" ):
UpperCamelCase__ = torch.manual_seed(A_ )
else:
UpperCamelCase__ = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = "cpu"
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**A_ )
UpperCamelCase__ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
UpperCamelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase__ = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase__ = "red cat, 4k photo"
UpperCamelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCamelCase__ = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCamelCase__ = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=1_00 , output_type="""np""" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A_ , A_ )
| 716 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __magic_name__ ( __a : int , __a : List[str] , __a : str=[] ):
'''simple docstring'''
UpperCamelCase__ = size[0] - overlap_pixels * 2
UpperCamelCase__ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCamelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
UpperCamelCase__ = np.pad(__a , mode="""linear_ramp""" , pad_width=__a , end_values=0 )
if "l" in remove_borders:
UpperCamelCase__ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCamelCase__ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __magic_name__ ( __a : int , __a : Dict , __a : Optional[int] ):
'''simple docstring'''
return max(__a , min(__a , __a ) )
def __magic_name__ ( __a : [int] , __a : [int] , __a : [int] ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __magic_name__ ( __a : [int] , __a : int , __a : [int] ):
'''simple docstring'''
UpperCamelCase__ = list(__a )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCamelCase__ = clamp_rect(__a , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : str , __a : List[Any] ):
'''simple docstring'''
UpperCamelCase__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__a , (original_slice, 0) )
return result
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
UpperCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCamelCase__ = tile.crop(__a )
return tile
def __magic_name__ ( __a : List[str] , __a : Any ):
'''simple docstring'''
UpperCamelCase__ = n % d
return n - divisor
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3_50 , ):
super().__init__(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , max_noise_level=SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
torch.manual_seed(0 )
UpperCamelCase__ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCamelCase__ = add_overlap_rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image.size )
UpperCamelCase__ = image.crop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCamelCase__ = translated_slice_x - (original_image_slice / 2)
UpperCamelCase__ = max(0 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = squeeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = to_input.size
UpperCamelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCamelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).__call__(image=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).images[0]
UpperCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCamelCase__ = unsqueeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCamelCase__ = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCamelCase__ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE_ ) , mode="""L""" , )
final_image.paste(
SCREAMING_SNAKE_CASE_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 75 , SCREAMING_SNAKE_CASE_ = 9.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ):
UpperCamelCase__ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
UpperCamelCase__ = math.ceil(image.size[0] / tile_size )
UpperCamelCase__ = math.ceil(image.size[1] / tile_size )
UpperCamelCase__ = tcx * tcy
UpperCamelCase__ = 0
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
self._process_tile(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prompt=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , noise_level=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__a , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCamelCase__ = pipe.to("""cuda""" )
UpperCamelCase__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(__a : Optional[int] ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCamelCase__ = pipe(image=__a , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__a )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 86 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.