code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from jiwer import compute_measures
import datasets
__lowercase :Optional[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__lowercase :Union[str, Any] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
__lowercase :Optional[Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : Union[str, Any] ) ->Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self : Optional[int] , a : Tuple=None , a : List[str]=None , a : Union[str, Any]=False ) ->int:
if concatenate_texts:
return compute_measures(a , a )["wer"]
else:
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for prediction, reference in zip(a , a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_measures(a , a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowercase :Any = logging.get_logger(__name__)
__lowercase :Union[str, Any] = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "imagegpt"
snake_case_ = ["past_key_values"]
snake_case_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , a : Optional[Any]=5_12 + 1 , a : Any=32 * 32 , a : Tuple=5_12 , a : Dict=24 , a : Dict=8 , a : str=None , a : Dict="quick_gelu" , a : str=0.1 , a : Union[str, Any]=0.1 , a : List[str]=0.1 , a : Dict=1E-5 , a : Tuple=0.02 , a : str=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=False , a : Union[str, Any]=False , **a : Tuple , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE__ : List[str] = n_embd
SCREAMING_SNAKE_CASE__ : List[str] = n_layer
SCREAMING_SNAKE_CASE__ : Dict = n_head
SCREAMING_SNAKE_CASE__ : List[str] = n_inner
SCREAMING_SNAKE_CASE__ : List[str] = activation_function
SCREAMING_SNAKE_CASE__ : Optional[int] = resid_pdrop
SCREAMING_SNAKE_CASE__ : Tuple = embd_pdrop
SCREAMING_SNAKE_CASE__ : Dict = attn_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scale_attn_weights
SCREAMING_SNAKE_CASE__ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ : int = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ : Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=a , **a )
class _a ( lowercase__ ):
"""simple docstring"""
@property
def A_ ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def A_ ( self : List[Any] , a : "FeatureExtractionMixin" , a : int = 1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , a : int = 3 , a : int = 32 , a : int = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_images(a , a , a , a )
SCREAMING_SNAKE_CASE__ : Any = dict(preprocessor(images=a , return_tensors=a ) )
return inputs
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowercase :Any = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , *a : List[Any] , **a : Optional[int] ) ->None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , a , )
super().__init__(*a , **a )
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :str = logging.get_logger(__name__)
__lowercase :List[str] = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "visual_bert"
def __init__( self : Union[str, Any] , a : int=3_05_22 , a : Dict=7_68 , a : Optional[int]=5_12 , a : int=12 , a : Union[str, Any]=12 , a : List[str]=30_72 , a : int="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : Union[str, Any]=5_12 , a : Optional[int]=2 , a : Tuple=0.02 , a : Optional[int]=1E-12 , a : Tuple=False , a : Any=True , a : Tuple=1 , a : Any=0 , a : Union[str, Any]=2 , **a : Optional[int] , ) ->Union[str, Any]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = bypass_transformer
SCREAMING_SNAKE_CASE__ : Optional[int] = special_visual_initialize
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
from ... import PretrainedConfig
__lowercase :Any = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
snake_case_ = "nezha"
def __init__( self : Tuple , a : Any=2_11_28 , a : List[str]=7_68 , a : List[str]=12 , a : Any=12 , a : List[str]=30_72 , a : Optional[int]="gelu" , a : Tuple=0.1 , a : Optional[Any]=0.1 , a : Dict=5_12 , a : Dict=64 , a : Any=2 , a : Any=0.02 , a : Optional[Any]=1E-12 , a : str=0.1 , a : List[str]=0 , a : str=2 , a : Optional[Any]=3 , a : Tuple=True , **a : Optional[int] , ) ->List[str]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = max_relative_position
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier_dropout
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Dict = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__lowercase :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase ( *_lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : Dict=True , _lowerCamelCase : Dict=2 ):
'''simple docstring'''
from .. import __version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = take_from
SCREAMING_SNAKE_CASE__ : str = ()
if not isinstance(args[0] , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
SCREAMING_SNAKE_CASE__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE__ : List[Any] = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
SCREAMING_SNAKE_CASE__ : List[str] = warning + " " if standard_warn else ""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ : List[str] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = call_frame.filename
SCREAMING_SNAKE_CASE__ : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE__ : Optional[Any] = call_frame.function
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int = 100 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = n * (n + 1) * (2 * n + 1) / 6
SCREAMING_SNAKE_CASE__ : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
class _a ( lowercase__ , lowercase__ ):
"""simple docstring"""
snake_case_ = 1
@register_to_config
def __init__( self : Dict , a : int = 20_00 , a : float = 0.15 , a : float = 0.01 , a : float = 1348.0 , a : float = 1E-5 , a : int = 1 , ) ->int:
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ : Any = sigma_max
# setable values
SCREAMING_SNAKE_CASE__ : Optional[int] = None
self.set_sigmas(a , a , a , a )
def A_ ( self : Any , a : torch.FloatTensor , a : Optional[int] = None ) ->torch.FloatTensor:
return sample
def A_ ( self : Tuple , a : int , a : float = None , a : Union[str, torch.device] = None ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE__ : str = torch.linspace(1 , a , a , device=a )
def A_ ( self : Optional[Any] , a : int , a : float = None , a : float = None , a : float = None ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE__ : List[str] = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE__ : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(a , a )
SCREAMING_SNAKE_CASE__ : Tuple = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.exp(torch.linspace(math.log(a ) , math.log(a ) , a ) )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def A_ ( self : Optional[Any] , a : str , a : Tuple ) ->Dict:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def A_ ( self : Optional[int] , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : Optional[torch.Generator] = None , a : bool = True , ) ->Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
SCREAMING_SNAKE_CASE__ : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE__ : str = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE__ : List[Any] = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_adjacent_sigma(a , a ).to(sample.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros_like(a )
SCREAMING_SNAKE_CASE__ : List[str] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE__ : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ : Any = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE__ : Tuple = randn_tensor(
sample.shape , layout=sample.layout , generator=a , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE__ : str = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE__ : str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=a , prev_sample_mean=a )
def A_ ( self : Optional[Any] , a : torch.FloatTensor , a : torch.FloatTensor , a : Optional[torch.Generator] = None , a : bool = True , ) ->Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE__ : str = randn_tensor(sample.shape , layout=sample.layout , generator=a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE__ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ : Any = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE__ : Optional[int] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE__ : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ : Tuple = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : List[str] = sample + step_size * model_output
SCREAMING_SNAKE_CASE__ : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def A_ ( self : Tuple , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ) ->torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE__ : Optional[int] = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ : Dict = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE__ : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(a ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE__ : int = noise + original_samples
return noisy_samples
def __len__( self : Dict ) ->Any:
return self.config.num_train_timesteps
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
from __future__ import annotations
import math
def UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = u
for i in range(1 , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : int = temp * (u - i)
return temp
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = int(input("enter the numbers of values: " ) )
SCREAMING_SNAKE_CASE__ : list[list[float]] = []
for _ in range(_lowerCamelCase ):
y.append([] )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
y[i].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
print("enter the values of parameters in a list: " )
SCREAMING_SNAKE_CASE__ : int = list(map(_lowerCamelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : int = float(input() )
SCREAMING_SNAKE_CASE__ : List[str] = int(input("enter the value to interpolate: " ) )
SCREAMING_SNAKE_CASE__ : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCamelCase ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = y[0][0]
for i in range(1 , _lowerCamelCase ):
summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AutoencoderKL
snake_case_ = "sample"
snake_case_ = 1e-2
@property
def A_ ( self : Union[str, Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : str = (32, 32)
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a )
return {"sample": image}
@property
def A_ ( self : Any ) ->Optional[Any]:
return (3, 32, 32)
@property
def A_ ( self : Any ) ->List[str]:
return (3, 32, 32)
def A_ ( self : str ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_input
return init_dict, inputs_dict
def A_ ( self : Optional[int] ) ->List[str]:
pass
def A_ ( self : List[Any] ) ->List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def A_ ( self : Dict ) ->int:
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_class(**a )
model.to(a )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE__ : List[str] = model(**a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE__ : Dict = torch.randn_like(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_class(**a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE__ : Tuple = model_a(**a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE__ : int = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
SCREAMING_SNAKE_CASE__ : Dict = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def A_ ( self : Dict ) ->List[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=a )
self.assertIsNotNone(a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
SCREAMING_SNAKE_CASE__ : int = model.to(a )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE__ : List[str] = image.to(a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a , sample_posterior=a , generator=a ).sample
SCREAMING_SNAKE_CASE__ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(a , a , rtol=1E-2 ) )
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Tuple , a : Union[str, Any] , a : Optional[int] ) ->int:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(a ) for s in shape] )}.npy"""
def A_ ( self : Union[str, Any] ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict , a : Any=0 , a : int=(4, 3, 5_12, 5_12) , a : Tuple=False ) ->Dict:
SCREAMING_SNAKE_CASE__ : str = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE__ : Any = torch.from_numpy(load_hf_numpy(self.get_file_format(a , a ) ) ).to(a ).to(a )
return image
def A_ ( self : Optional[int] , a : Optional[Any]="CompVis/stable-diffusion-v1-4" , a : str=False ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "fp16" if fpaa else None
SCREAMING_SNAKE_CASE__ : str = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoencoderKL.from_pretrained(
a , subfolder="vae" , torch_dtype=a , revision=a , )
model.to(a ).eval()
return model
def A_ ( self : Optional[int] , a : Optional[int]=0 ) ->List[Any]:
if torch_device == "mps":
return torch.manual_seed(a )
return torch.Generator(device=a ).manual_seed(a )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Optional[int] , a : str , a : Optional[Any] , a : Optional[int] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_sd_image(a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_generator(a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(a , generator=a , sample_posterior=a ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE__ : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(a , a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : List[str] , a : List[str] , a : Any ) ->Any:
SCREAMING_SNAKE_CASE__ : int = self.get_sd_vae_model(fpaa=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_sd_image(a , fpaa=a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_generator(a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a , generator=a , sample_posterior=a ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE__ : int = torch.tensor(a )
assert torch_all_close(a , a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : List[Any] , a : Tuple , a : Optional[int] , a : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : Dict = self.get_sd_image(a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(a , a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : str , a : int , a : int ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_sd_image(a , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model.decode(a ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
SCREAMING_SNAKE_CASE__ : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(a )
assert torch_all_close(a , a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Any , a : Tuple , a : Any ) ->str:
SCREAMING_SNAKE_CASE__ : Any = self.get_sd_vae_model(fpaa=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_sd_image(a , shape=(3, 4, 64, 64) , fpaa=a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.decode(a ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
SCREAMING_SNAKE_CASE__ : Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(a )
assert torch_all_close(a , a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def A_ ( self : Tuple , a : Optional[int] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_sd_vae_model(fpaa=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_sd_image(a , shape=(3, 4, 64, 64) , fpaa=a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model.decode(a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model.decode(a ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(a , a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def A_ ( self : str , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : int = self.get_sd_image(a , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model.decode(a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model.decode(a ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(a , a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A_ ( self : Any , a : int , a : List[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_sd_image(a )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_generator(a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model.encode(a ).latent_dist
SCREAMING_SNAKE_CASE__ : Tuple = dist.sample(generator=a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE__ : List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(a , a , atol=a )
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any , a : List[str] , a : List[str] , a : Union[str, Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = dataset
SCREAMING_SNAKE_CASE__ : List[Any] = process
SCREAMING_SNAKE_CASE__ : Optional[Any] = params
def __len__( self : int ) ->Optional[int]:
return len(self.dataset )
def __getitem__( self : Optional[Any] , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = self.dataset[i]
SCREAMING_SNAKE_CASE__ : str = self.process(a , **self.params )
return processed
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a : Any , a : List[Any] , a : List[Any] , a : Any=None ) ->str:
SCREAMING_SNAKE_CASE__ : Any = loader
SCREAMING_SNAKE_CASE__ : Optional[int] = infer
SCREAMING_SNAKE_CASE__ : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Tuple = None
def __len__( self : Optional[int] ) ->Optional[Any]:
return len(self.loader )
def __iter__( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = iter(self.loader )
return self
def A_ ( self : Tuple ) ->Dict:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE__ : int = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(a , a ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE__ : int = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE__ : Dict = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(a , a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE__ : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE__ : Optional[int] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE__ : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE__ : int = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE__ : Any = self._loader_batch_data.__class__(a )
self._loader_batch_index += 1
return result
def A_ ( self : List[str] ) ->Optional[Any]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE__ : Any = next(self.iterator )
SCREAMING_SNAKE_CASE__ : List[Any] = self.infer(a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(a , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Tuple = processed
else:
SCREAMING_SNAKE_CASE__ : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE__ : Any = processed[key]
if isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE__ : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE__ : Tuple = processed
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple , a : Union[str, Any] , a : Union[str, Any] , a : List[str] , a : Any=None ) ->List[Any]:
super().__init__(a , a , a )
def __iter__( self : Optional[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = iter(self.loader )
SCREAMING_SNAKE_CASE__ : Tuple = None
return self
def A_ ( self : Dict ) ->int:
if self.subiterator is None:
SCREAMING_SNAKE_CASE__ : str = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE__ : Optional[int] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE__ : str = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE__ : List[str] = next(self.subiterator )
return processed
class _a ( lowercase__ ):
"""simple docstring"""
def __iter__( self : str ) ->Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = iter(self.loader )
return self
def A_ ( self : List[Any] ) ->Optional[Any]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE__ : List[str] = self.loader_batch_item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = item.pop("is_last" )
accumulator.append(a )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE__ : Tuple = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(a , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Dict = processed
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE__ : List[str] = processed[key]
if isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : str = len(a )
else:
SCREAMING_SNAKE_CASE__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE__ : Any = observed_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed
SCREAMING_SNAKE_CASE__ : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE__ : Dict = self.loader_batch_item()
SCREAMING_SNAKE_CASE__ : str = item.pop("is_last" )
accumulator.append(a )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE__ : Tuple = processed
SCREAMING_SNAKE_CASE__ : Union[str, Any] = item.pop("is_last" )
accumulator.append(a )
return accumulator
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Dataset , a : str ) ->Any:
SCREAMING_SNAKE_CASE__ : List[str] = dataset
SCREAMING_SNAKE_CASE__ : Any = key
def __len__( self : str ) ->List[Any]:
return len(self.dataset )
def __getitem__( self : int , a : Any ) ->Union[str, Any]:
return self.dataset[i][self.key]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple , a : Dataset , a : str , a : str ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset
SCREAMING_SNAKE_CASE__ : Optional[int] = keya
SCREAMING_SNAKE_CASE__ : List[Any] = keya
def __len__( self : Union[str, Any] ) ->int:
return len(self.dataset )
def __getitem__( self : Any , a : Union[str, Any] ) ->Any:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowercase :List[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : Optional[int] , a : List[str]=16 , a : str=13 , a : Tuple=7 , a : Any=14 , a : List[str]=10 , a : Any=19 , a : List[Any]=5 , a : Tuple=4 , a : Tuple=True , a : Union[str, Any]=16 , a : Optional[Any]=2 , a : Union[str, Any]=4 , a : Dict=4 , a : Any="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Optional[int]=[1, 2, 3, 4, 5] , a : int=25 , a : Optional[int]=5 , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : int = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : int = prediction_length
SCREAMING_SNAKE_CASE__ : Optional[int] = context_length
SCREAMING_SNAKE_CASE__ : Dict = cardinality
SCREAMING_SNAKE_CASE__ : int = num_time_features
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lags_sequence
SCREAMING_SNAKE_CASE__ : Any = embedding_dimension
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = context_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prediction_length + label_length
SCREAMING_SNAKE_CASE__ : List[str] = label_length
SCREAMING_SNAKE_CASE__ : List[str] = moving_average
SCREAMING_SNAKE_CASE__ : Dict = autocorrelation_factor
def A_ ( self : Union[str, Any] ) ->Optional[int]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A_ ( self : List[Any] , a : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = config.context_length + max(config.lags_sequence )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, _past_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, config.prediction_length] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def A_ ( self : Any ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_autoformer_inputs_dict(a )
return config, inputs_dict
def A_ ( self : List[Any] ) ->Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : List[str] , a : Optional[Any] , a : Any ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Any = AutoformerModel(config=a ).to(a ).eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(**a )
SCREAMING_SNAKE_CASE__ : int = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : str = model.get_encoder()
encoder.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoformerEncoder.from_pretrained(a ).to(a )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = model.create_network_inputs(**a )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder(inputs_embeds=a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
SCREAMING_SNAKE_CASE__ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
SCREAMING_SNAKE_CASE__ : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = model.get_decoder()
decoder.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : List[str] = AutoformerDecoder.from_pretrained(a ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder(
trend=a , inputs_embeds=a , encoder_hidden_states=a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
snake_case_ = (AutoformerForPrediction,) if is_torch_available() else ()
snake_case_ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = AutoformerModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=a , has_text_modality=a )
def A_ ( self : Tuple ) ->str:
self.config_tester.run_common_tests()
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = model_class.from_pretrained(a , output_loading_info=a )
self.assertEqual(info["missing_keys"] , [] )
def A_ ( self : str ) ->Dict:
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a )
@unittest.skip(reason="Model has no tokens embeddings" )
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
pass
def A_ ( self : Dict ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(getattr(a , "forward" ) )
# The main input is the name of the argument after `self`
SCREAMING_SNAKE_CASE__ : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a )
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(a )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(a )] , a )
def A_ ( self : Any ) ->List[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(self.model_tester , "seq_length" , a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(self.model_tester , "decoder_seq_length" , a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(self.model_tester , "encoder_seq_length" , a )
SCREAMING_SNAKE_CASE__ : List[str] = getattr(self.model_tester , "d_model" , a )
SCREAMING_SNAKE_CASE__ : str = getattr(self.model_tester , "num_attention_heads" , a )
SCREAMING_SNAKE_CASE__ : int = d_model // num_attention_heads
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
SCREAMING_SNAKE_CASE__ : Tuple = len(a )
SCREAMING_SNAKE_CASE__ : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a , a )
# decoder attentions
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.decoder_attentions
self.assertIsInstance(a , (list, tuple) )
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
SCREAMING_SNAKE_CASE__ : int = outputs.cross_attentions
self.assertIsInstance(a , (list, tuple) )
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 2 , len(a ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A_ ( self : int ) ->Optional[int]:
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase ( _lowerCamelCase : Any="train-batch.pt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_lowerCamelCase , repo_type="dataset" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : str ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_batch()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
SCREAMING_SNAKE_CASE__ : int = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=a )
self.assertTrue(torch.allclose(output[0, :3, :3] , a , atol=a ) )
def A_ ( self : int ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_batch("val-batch.pt" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=a )
self.assertTrue(torch.allclose(output[0, :3, :3] , a , atol=a ) )
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(a )
SCREAMING_SNAKE_CASE__ : List[str] = prepare_batch("val-batch.pt" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=a )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a , rtol=1E-1 ) )
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase :List[str] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase :Union[str, Any] = [0, 25, 50]
__lowercase :Optional[int] = [25, 50, 75]
__lowercase :Union[str, Any] = fuzz.membership.trimf(X, abca)
__lowercase :int = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase :int = np.ones(75)
__lowercase :List[str] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase :str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase :str = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase :List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase :str = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase :int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase :str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase :List[str] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase :Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
SCREAMING_SNAKE_CASE__ : Any = (l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE__ : Optional[int] = m
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = m # noqa: E741
return r
def UpperCAmelCase ( _lowerCamelCase : list[int] ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
return 0
SCREAMING_SNAKE_CASE__ : Dict = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : int = v[0]
for i in range(1 , len(_lowerCamelCase ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE__ : List[Any] = v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = v[i]
length += 1
else:
SCREAMING_SNAKE_CASE__ : str = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _a ( enum.Enum ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
snake_case_ = 2
@add_end_docstrings(lowercase__ )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[Any] , *a : str , **a : str ) ->Tuple:
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
SCREAMING_SNAKE_CASE__ : int = None
if self.model.config.prefix is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
SCREAMING_SNAKE_CASE__ : Any = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._sanitize_parameters(prefix=a , **self._forward_params )
SCREAMING_SNAKE_CASE__ : Optional[int] = {**self._preprocess_params, **preprocess_params}
SCREAMING_SNAKE_CASE__ : Any = {**self._forward_params, **forward_params}
def A_ ( self : Tuple , a : Any=None , a : List[str]=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : Any=None , a : Any=None , a : Any=None , a : List[Any]=None , **a : Optional[Any] , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = {}
if prefix is not None:
SCREAMING_SNAKE_CASE__ : str = prefix
if prefix:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, 'hole']" )
SCREAMING_SNAKE_CASE__ : List[str] = handle_long_generation
preprocess_params.update(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_kwargs
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
SCREAMING_SNAKE_CASE__ : str = ReturnType.TENSORS
if return_type is not None:
SCREAMING_SNAKE_CASE__ : List[str] = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
SCREAMING_SNAKE_CASE__ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A_ ( self : Dict , *a : List[Any] , **a : Union[str, Any] ) ->Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self : Optional[int] , a : Any , **a : Optional[Any] ) ->List[Any]:
return super().__call__(a , **a )
def A_ ( self : str , a : Tuple , a : Union[str, Any]="" , a : str=None , **a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Any = prompt_text
if handle_long_generation == "hole":
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : str = generate_kwargs["max_new_tokens"]
else:
SCREAMING_SNAKE_CASE__ : Tuple = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
SCREAMING_SNAKE_CASE__ : List[str] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def A_ ( self : str , a : int , **a : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs["input_ids"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
else:
SCREAMING_SNAKE_CASE__ : Dict = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : str = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
SCREAMING_SNAKE_CASE__ : str = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(input_ids=a , attention_mask=a , **a )
SCREAMING_SNAKE_CASE__ : Any = generated_sequence.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Tuple = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : int = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A_ ( self : Optional[int] , a : Optional[int] , a : Any=ReturnType.FULL_TEXT , a : Tuple=True ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_outputs["generated_sequence"][0]
SCREAMING_SNAKE_CASE__ : List[str] = model_outputs["input_ids"]
SCREAMING_SNAKE_CASE__ : int = model_outputs["prompt_text"]
SCREAMING_SNAKE_CASE__ : str = generated_sequence.numpy().tolist()
SCREAMING_SNAKE_CASE__ : List[str] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE__ : str = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
SCREAMING_SNAKE_CASE__ : Any = 0
else:
SCREAMING_SNAKE_CASE__ : List[Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text + text[prompt_length:]
else:
SCREAMING_SNAKE_CASE__ : Tuple = text[prompt_length:]
SCREAMING_SNAKE_CASE__ : Dict = {"generated_text": all_text}
records.append(a )
return records
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowercase :Optional[Any] = HUGGINGFACE_HUB_CACHE
__lowercase :Tuple = "config.json"
__lowercase :Optional[Any] = "diffusion_pytorch_model.bin"
__lowercase :int = "diffusion_flax_model.msgpack"
__lowercase :Dict = "model.onnx"
__lowercase :Optional[int] = "diffusion_pytorch_model.safetensors"
__lowercase :Union[str, Any] = "weights.pb"
__lowercase :Any = "https://huggingface.co"
__lowercase :List[str] = default_cache_path
__lowercase :Union[str, Any] = "diffusers_modules"
__lowercase :Optional[int] = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__lowercase :Optional[Any] = ["fp16", "non-ema"]
__lowercase :List[Any] = ".self_attn"
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_name.split("." )
if layer == "0":
SCREAMING_SNAKE_CASE__ : Tuple = old_name.replace("0" , "convolution1" )
elif layer == "1":
SCREAMING_SNAKE_CASE__ : int = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
SCREAMING_SNAKE_CASE__ : List[str] = old_name.replace("3" , "convolution2" )
else:
SCREAMING_SNAKE_CASE__ : List[str] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = r"\b\d{2}\b"
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.search(r"\d\.\d\d." , _lowerCamelCase ).group()
else:
SCREAMING_SNAKE_CASE__ : str = re.search(r"\d\.\d." , _lowerCamelCase ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE__ : Tuple = old_name.replace(_lowerCamelCase , "" )
SCREAMING_SNAKE_CASE__ : List[Any] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "intermediate_stages." + trimmed_name
else:
SCREAMING_SNAKE_CASE__ : str = old_name.replace(_lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE__ : str = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE__ : List[str] = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE__ : List[Any] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE__ : Dict = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = trimmed_name.replace("fc2" , "linear_out" )
SCREAMING_SNAKE_CASE__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
SCREAMING_SNAKE_CASE__ : Dict = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE__ : Optional[int] = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
SCREAMING_SNAKE_CASE__ : Dict = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
SCREAMING_SNAKE_CASE__ : int = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE__ : List[Any] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_name.replace("norm" , "layernorm" )
SCREAMING_SNAKE_CASE__ : Dict = "efficientformer." + new_name
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "efficientformer.encoder." + new_name
return new_name
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = val
return checkpoint
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def UpperCAmelCase ( _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
SCREAMING_SNAKE_CASE__ : Tuple = EfficientFormerConfig.from_json_file(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
SCREAMING_SNAKE_CASE__ : Dict = config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : str = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = 256
SCREAMING_SNAKE_CASE__ : int = 224
SCREAMING_SNAKE_CASE__ : Dict = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
SCREAMING_SNAKE_CASE__ : List[str] = processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
SCREAMING_SNAKE_CASE__ : str = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = outputs.logits
SCREAMING_SNAKE_CASE__ : Tuple = (1, 1_000)
if "l1" in model_name:
SCREAMING_SNAKE_CASE__ : Dict = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE__ : int = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_lowerCamelCase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__lowercase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
__lowercase :List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
SCREAMING_SNAKE_CASE__ : Optional[int] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ : Optional[int] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ : Optional[int] = max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__lowercase :Any = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] , *a : Any , **a : Optional[Any] ) ->None:
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , a , )
super().__init__(*a , **a )
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowercase :str = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowercase :Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str=100 , _lowerCamelCase : str=" " ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = text.split(_lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )]
def UpperCAmelCase ( _lowerCamelCase : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_lowerCamelCase ):
titles.append(title if title is not None else "" )
texts.append(_lowerCamelCase )
return {"title": titles, "text": texts}
def UpperCAmelCase ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt" )["input_ids"]
SCREAMING_SNAKE_CASE__ : Dict = ctx_encoder(input_ids.to(device=_lowerCamelCase ) , return_dict=_lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE__ : Dict = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE__ : Tuple = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE__ : str = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE__ : List[str] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase ) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase )
# And save the index
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
default=str(Path(lowercase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
snake_case_ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
snake_case_ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
snake_case_ = field(
default=str(Path(lowercase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
default=lowercase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
snake_case_ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowercase :str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowercase , __lowercase , __lowercase :List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowercase :str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
import operator as op
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = lambda _lowerCamelCase , _lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
else:
SCREAMING_SNAKE_CASE__ : Dict = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
SCREAMING_SNAKE_CASE__ : int = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_lowerCamelCase ) , int(_lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase :Tuple = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
import baseaa
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCAmelCase ( _lowerCamelCase : bytes ):
'''simple docstring'''
return baseaa.baadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
__lowercase :Optional[Any] = "Hello World!"
__lowercase :Any = baseaa_encode(test)
print(encoded)
__lowercase :Dict = baseaa_decode(encoded)
print(decoded)
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE__ : str = b * b - 4 * a * c
SCREAMING_SNAKE_CASE__ : int = (-b + sqrt(_lowerCamelCase )) / (2 * a)
SCREAMING_SNAKE_CASE__ : Dict = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ShapEPipeline
snake_case_ = ["prompt"]
snake_case_ = ["prompt"]
snake_case_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def A_ ( self : Dict ) ->List[Any]:
return 32
@property
def A_ ( self : str ) ->Optional[int]:
return 32
@property
def A_ ( self : Tuple ) ->List[str]:
return self.time_input_dim * 4
@property
def A_ ( self : Any ) ->List[Any]:
return 8
@property
def A_ ( self : List[str] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def A_ ( self : Dict ) ->Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a )
@property
def A_ ( self : Optional[int] ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE__ : Any = PriorTransformer(**a )
return model
@property
def A_ ( self : Dict ) ->List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : Optional[int] = ShapERenderer(**a )
return model
def A_ ( self : List[str] ) ->Any:
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_prior
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_renderer
SCREAMING_SNAKE_CASE__ : Tuple = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=a , clip_sample=a , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def A_ ( self : Tuple , a : str , a : str=0 ) ->Optional[Any]:
if str(a ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(a )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def A_ ( self : str ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[int] = "cpu"
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**a )
SCREAMING_SNAKE_CASE__ : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe(**self.get_dummy_inputs(a ) )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
SCREAMING_SNAKE_CASE__ : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self : List[str] ) ->List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : List[str] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch_device == "cpu"
SCREAMING_SNAKE_CASE__ : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a , relax_max_difference=a , )
def A_ ( self : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = self.pipeline_class(**a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : Dict = 2
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs(a )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a , num_images_per_prompt=a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : str ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
SCREAMING_SNAKE_CASE__ : Tuple = ShapEPipeline.from_pretrained("openai/shap-e" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
"a shark" , generator=a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a , a )
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Optional[Any] = logging.get_logger(__name__)
__lowercase :Dict = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "audio-spectrogram-transformer"
def __init__( self : str , a : Dict=7_68 , a : List[str]=12 , a : List[str]=12 , a : Optional[int]=30_72 , a : Dict="gelu" , a : str=0.0 , a : int=0.0 , a : int=0.02 , a : Optional[int]=1E-12 , a : Optional[int]=16 , a : Tuple=True , a : List[str]=10 , a : int=10 , a : int=10_24 , a : Optional[Any]=1_28 , **a : List[str] , ) ->List[Any]:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE__ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frequency_stride
SCREAMING_SNAKE_CASE__ : Tuple = time_stride
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_mel_bins
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : Dict = ""
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : str = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = dct.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNConfig()
SCREAMING_SNAKE_CASE__ : Any = 1_000
SCREAMING_SNAKE_CASE__ : Optional[int] = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE__ : Any = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE__ : Any = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , "r" ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = 384
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_536
SCREAMING_SNAKE_CASE__ : List[Any] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : str = 1_024
SCREAMING_SNAKE_CASE__ : str = 4_096
SCREAMING_SNAKE_CASE__ : Dict = 24
SCREAMING_SNAKE_CASE__ : Optional[int] = 16
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = 7
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_024
SCREAMING_SNAKE_CASE__ : List[Any] = 4_096
SCREAMING_SNAKE_CASE__ : Optional[Any] = 24
SCREAMING_SNAKE_CASE__ : Tuple = 16
SCREAMING_SNAKE_CASE__ : int = 0.1
SCREAMING_SNAKE_CASE__ : str = ViTMSNModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = image_processor(images=_lowerCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowercase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowercase :Dict = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowercase :Dict = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["input_values", "padding_mask"]
def __init__( self : Optional[Any] , a : int = 1 , a : int = 2_40_00 , a : float = 0.0 , a : float = None , a : float = None , **a : str , ) ->Optional[Any]:
super().__init__(feature_size=a , sampling_rate=a , padding_value=a , **a )
SCREAMING_SNAKE_CASE__ : Any = chunk_length_s
SCREAMING_SNAKE_CASE__ : Dict = overlap
@property
def A_ ( self : int ) ->Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A_ ( self : Tuple ) ->Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[bool, str, PaddingStrategy]] = None , a : Optional[bool] = False , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[int] = None , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bool(
isinstance(a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(a , np.ndarray ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ : Dict = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ : str = [np.asarray(a ).T]
# verify inputs are valid
for idx, example in enumerate(a ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
SCREAMING_SNAKE_CASE__ : Any = min(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE__ : List[str] = int(np.floor(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
SCREAMING_SNAKE_CASE__ : List[Any] = max(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE__ : Dict = int(np.ceil(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE__ : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
SCREAMING_SNAKE_CASE__ : Any = "max_length"
else:
SCREAMING_SNAKE_CASE__ : Any = input_values
# normal padding on batch
if padded_inputs is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.pad(
a , max_length=a , truncation=a , padding=a , return_attention_mask=a , )
if padding:
SCREAMING_SNAKE_CASE__ : List[str] = padded_inputs.pop("attention_mask" )
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
SCREAMING_SNAKE_CASE__ : int = example[..., None]
input_values.append(example.T )
SCREAMING_SNAKE_CASE__ : str = input_values
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ : str = padded_inputs.convert_to_tensors(a )
return padded_inputs
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
while a != 0:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = b % a, a
return b
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if gcd(_lowerCamelCase , _lowerCamelCase ) != 1:
SCREAMING_SNAKE_CASE__ : List[str] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(_lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = 1, 0, a
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = 0, 1, m
while va != 0:
SCREAMING_SNAKE_CASE__ : int = ua // va
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :Any = logging.get_logger(__name__)
__lowercase :List[Any] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def UpperCAmelCase ( _lowerCamelCase : List[Any] ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ : Optional[Any] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("encoder" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = k.replace(".attn" , ".self_attn" )
SCREAMING_SNAKE_CASE__ : Dict = k.replace("norm1" , "self_attn_layer_norm" )
SCREAMING_SNAKE_CASE__ : str = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
SCREAMING_SNAKE_CASE__ : Dict = k.replace("norm1" , "self_attn_layer_norm" )
SCREAMING_SNAKE_CASE__ : Tuple = k.replace("norm2" , "encoder_attn_layer_norm" )
SCREAMING_SNAKE_CASE__ : int = k.replace("norm3" , "final_layer_norm" )
return k
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
SCREAMING_SNAKE_CASE__ : Optional[int] = sd.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
SCREAMING_SNAKE_CASE__ : str = v
__lowercase :List[str] = ["START"]
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(_lowerCamelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model["model"]
SCREAMING_SNAKE_CASE__ : List[str] = BlenderbotConfig.from_json_file(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = BlenderbotForConditionalGeneration(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : List[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE__ : List[str] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowercase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__lowercase :List[str] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__lowercase :int = True
from torch.cuda.amp import autocast
__lowercase :str = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Whether to log verbose messages or not."} , )
snake_case_ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
snake_case_ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
snake_case_ = field(
default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} )
def UpperCAmelCase ( _lowerCamelCase : ModelArguments , _lowerCamelCase : TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE__ : str = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE__ : str = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE__ : List[str] = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
default=lowercase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
snake_case_ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
snake_case_ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case_ = field(
default=2_0.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = "longest"
snake_case_ = None
snake_case_ = None
def __call__( self : Any , a : List[Dict[str, Union[List[int], torch.Tensor]]] ) ->Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feature_extractor.pad(
a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
SCREAMING_SNAKE_CASE__ : int = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE__ : List[Any] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE__ : int = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE__ : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=a , min_masks=2 , )
return batch
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *a : Dict , a : List[Any]=1 , a : Optional[Any]=0 , a : Tuple=1.0 , **a : Tuple ) ->Optional[int]:
super().__init__(*a , **a )
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : str = max_gumbel_temp
SCREAMING_SNAKE_CASE__ : str = min_gumbel_temp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = gumbel_temp_decay
def A_ ( self : Any , a : nn.Module , a : Dict[str, Union[torch.Tensor, Any]] ) ->torch.Tensor:
model.train()
SCREAMING_SNAKE_CASE__ : int = self._prepare_inputs(a )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE__ : Optional[int] = self.compute_loss(a , a )
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.compute_loss(a , a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE__ : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE__ : Tuple = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(a ).backward()
elif self.use_apex:
with amp.scale_loss(a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE__ : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : Optional[int] = DatasetDict()
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : str = DatasetDict()
SCREAMING_SNAKE_CASE__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase : Any ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE__ : Optional[Any] = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE__ : int = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase : Union[str, Any] ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE__ : int = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE__ : Any = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaForPreTraining(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
from __future__ import annotations
from collections import deque
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : list[str] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(a )
self.set_fail_transitions()
def A_ ( self : int , a : int , a : str ) ->int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A_ ( self : str , a : str ) ->None:
SCREAMING_SNAKE_CASE__ : int = 0
for character in keyword:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.find_next_state(a , a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = next_state
self.adlist[current_state]["output"].append(a )
def A_ ( self : Any ) ->None:
SCREAMING_SNAKE_CASE__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE__ : Dict = 0
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE__ : Dict = self.adlist[r]["fail_state"]
while (
self.find_next_state(a , self.adlist[child]["value"] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__ : Dict = self.adlist[state]["fail_state"]
SCREAMING_SNAKE_CASE__ : int = self.find_next_state(
a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : str = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def A_ ( self : int , a : str ) ->dict[str, list[int]]:
SCREAMING_SNAKE_CASE__ : dict = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(len(a ) ):
while (
self.find_next_state(a , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__ : int = self.adlist[current_state]["fail_state"]
SCREAMING_SNAKE_CASE__ : Any = self.find_next_state(a , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__ : str = 0
else:
SCREAMING_SNAKE_CASE__ : List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
result[key].append(i - len(a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
__lowercase :Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
SCREAMING_SNAKE_CASE__ : Stack[int] = Stack()
SCREAMING_SNAKE_CASE__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCamelCase )
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE__ : Union[str, Any] = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE__ : str = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE__ : int = operators[opr](_lowerCamelCase , _lowerCamelCase )
operand_stack.push(_lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowercase :str = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__lowercase :str = logging.get_logger(__name__)
__lowercase :int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowercase :str = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__lowercase :Optional[Any] = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = RobertaTokenizer
def __init__( self : Tuple , a : Any=None , a : str=None , a : Dict=None , a : int="replace" , a : Optional[Any]="<s>" , a : Any="</s>" , a : Union[str, Any]="</s>" , a : Dict="<s>" , a : Any="<unk>" , a : str="<pad>" , a : int="<mask>" , a : List[str]=False , a : Tuple=True , **a : int , ) ->int:
super().__init__(
a , a , tokenizer_file=a , errors=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , add_prefix_space=a , trim_offsets=a , **a , )
SCREAMING_SNAKE_CASE__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ : int = getattr(a , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = add_prefix_space
SCREAMING_SNAKE_CASE__ : List[str] = pre_tok_class(**a )
SCREAMING_SNAKE_CASE__ : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE__ : int = "post_processor"
SCREAMING_SNAKE_CASE__ : Any = getattr(self.backend_tokenizer , a , a )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(state["sep"] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tuple(state["cls"] )
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if state.get("add_prefix_space" , a ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ : Optional[Any] = add_prefix_space
SCREAMING_SNAKE_CASE__ : Tuple = True
if state.get("trim_offsets" , a ) != trim_offsets:
SCREAMING_SNAKE_CASE__ : List[str] = trim_offsets
SCREAMING_SNAKE_CASE__ : str = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(a , state.pop("type" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = component_class(**a )
setattr(self.backend_tokenizer , a , a )
@property
def A_ ( self : List[str] ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self : Tuple , a : Optional[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else value
SCREAMING_SNAKE_CASE__ : List[Any] = value
def A_ ( self : Union[str, Any] , *a : Union[str, Any] , **a : List[Any] ) ->BatchEncoding:
SCREAMING_SNAKE_CASE__ : int = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a , **a )
def A_ ( self : List[str] , *a : Dict , **a : List[str] ) ->BatchEncoding:
SCREAMING_SNAKE_CASE__ : int = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a , **a )
def A_ ( self : Dict , a : str , a : Optional[str] = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE__ : List[str] = self._tokenizer.model.save(a , name=a )
return tuple(a )
def A_ ( self : Any , a : Union[str, Any] , a : str=None ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self : Union[str, Any] , a : List[int] , a : Optional[List[int]] = None ) ->List[int]:
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
if start < end:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = randint(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = a[end]
SCREAMING_SNAKE_CASE__ : List[str] = a[pivot]
SCREAMING_SNAKE_CASE__ : Tuple = temp
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = randint(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = a[end]
SCREAMING_SNAKE_CASE__ : Optional[int] = a[pivot]
SCREAMING_SNAKE_CASE__ : List[Any] = temp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE__ : Optional[int] = new_pivot_index + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = a[new_pivot_index]
SCREAMING_SNAKE_CASE__ : Tuple = a[index]
SCREAMING_SNAKE_CASE__ : Any = temp
SCREAMING_SNAKE_CASE__ : List[Any] = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE__ : Dict = a[end]
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
return new_pivot_index + 1, count
__lowercase :str = TemporaryFile()
__lowercase :List[Any] = 100 # 1000 elements are to be sorted
__lowercase , __lowercase :Dict = 0, 1 # mean and standard deviation
__lowercase :str = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
__lowercase :Tuple = np.load(outfile)
__lowercase :int = len(M) - 1
__lowercase :Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
__lowercase :Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
__lowercase :str = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
__lowercase :Tuple = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def A_ ( self : int ) ->str:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def A_ ( self : Union[str, Any] , a : List[str] , a : Dict , a : Union[str, Any]=None , a : List[Any]="uniform_average" , a : List[Any]=True ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = mean_squared_error(
a , a , sample_weight=a , multioutput=a , squared=a )
return {"mse": mse}
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
__lowercase :List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__lowercase :Optional[int] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__lowercase :Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : Dict ) ->Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def A_ ( self : str , a : List[Any] , a : Optional[Any] , a : Optional[int]=None ) ->List[Any]:
return {
"matthews_correlation": float(matthews_corrcoef(a , a , sample_weight=a ) ),
}
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Union[str, Any] = logging.get_logger(__name__)
__lowercase :Any = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "xglm"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , a : int=25_60_08 , a : str=20_48 , a : Union[str, Any]=10_24 , a : str=40_96 , a : Optional[Any]=24 , a : Union[str, Any]=16 , a : Optional[Any]="gelu" , a : List[Any]=0.1 , a : Dict=0.1 , a : Any=0.0 , a : Optional[int]=0.0 , a : Union[str, Any]=0.02 , a : List[Any]=True , a : Optional[Any]=True , a : Union[str, Any]=2 , a : str=1 , a : Union[str, Any]=0 , a : List[Any]=2 , **a : Any , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = d_model
SCREAMING_SNAKE_CASE__ : Any = ffn_dim
SCREAMING_SNAKE_CASE__ : str = num_layers
SCREAMING_SNAKE_CASE__ : List[Any] = attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Optional[int] = dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE__ : int = init_std
SCREAMING_SNAKE_CASE__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : str = use_cache
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
class _a :
"""simple docstring"""
def __init__( self : Dict , a : int ) ->int:
SCREAMING_SNAKE_CASE__ : list[list[Edge]] = [[] for _ in range(a )]
SCREAMING_SNAKE_CASE__ : List[Any] = size
def __getitem__( self : Union[str, Any] , a : int ) ->Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self._size
def A_ ( self : List[Any] , a : int , a : int , a : int ) ->List[str]:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(a , a ) )
def A_ ( self : List[str] , a : int , a : int ) ->int | None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = deque([start_vertex] )
SCREAMING_SNAKE_CASE__ : list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
while queue:
SCREAMING_SNAKE_CASE__ : List[Any] = queue.popleft()
SCREAMING_SNAKE_CASE__ : List[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE__ : str = current_distance + edge.weight
SCREAMING_SNAKE_CASE__ : str = distances[edge.destination_vertex]
if (
isinstance(a , a )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE__ : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
SCREAMING_SNAKE_CASE__ : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE__ : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE__ : Any = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE__ : List[str] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE__ : Any = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE__ : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE__ : str = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE__ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE__ : List[Any] = tf.reduce_mean(_lowerCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE__ : List[str] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : str = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase , _lowerCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.argmin(_lowerCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE__ : Any = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE__ : Optional[int] = [
sess.run(_lowerCamelCase , feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE__ : Tuple = sess.run(
_lowerCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE__ : Tuple = sess.run(
_lowerCamelCase , feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE__ : Optional[Any] = sess.run(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :Optional[Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[int] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowercase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from __future__ import annotations
__lowercase :Optional[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : list[int] , _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : list[list[int]] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE__ : List[str] = init[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = init[1]
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE__ : List[Any] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE__ : List[Any] = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE__ : Optional[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE__ : Dict = cell.pop()
SCREAMING_SNAKE_CASE__ : Any = next_cell[2]
SCREAMING_SNAKE_CASE__ : str = next_cell[3]
SCREAMING_SNAKE_CASE__ : str = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE__ : List[Any] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE__ : List[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE__ : Any = g + cost
SCREAMING_SNAKE_CASE__ : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = i
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : str = goal[0]
SCREAMING_SNAKE_CASE__ : int = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE__ : int = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE__ : Tuple = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE__ : List[Any] = xa
SCREAMING_SNAKE_CASE__ : Optional[int] = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowercase :int = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowercase :List[str] = [0, 0]
# all coordinates are given in format [y,x]
__lowercase :Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1]
__lowercase :Optional[Any] = 1
# the cost map which pushes the path closer to the goal
__lowercase :List[str] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowercase :Optional[int] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowercase :str = 99
__lowercase , __lowercase :Optional[int] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , a : Dict[str, int] , a : List[str] , a : int = None , a : int = None ) ->Dict:
super().__init__()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = max_length
SCREAMING_SNAKE_CASE__ : Tuple = vocab
SCREAMING_SNAKE_CASE__ : List[str] = merges
SCREAMING_SNAKE_CASE__ : Tuple = BytePairTokenizer(a , a , sequence_length=a )
@classmethod
def A_ ( cls : Union[str, Any] , a : GPTaTokenizer , *a : Tuple , **a : Optional[int] ) ->Any:
SCREAMING_SNAKE_CASE__ : int = [" ".join(a ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.get_vocab()
return cls(a , a , *a , **a )
@classmethod
def A_ ( cls : Union[str, Any] , a : Union[str, os.PathLike] , *a : int , **a : Dict ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(a , *a , **a )
return cls.from_tokenizer(a , *a , **a )
@classmethod
def A_ ( cls : List[str] , a : Optional[int] ) ->int:
return cls(**a )
def A_ ( self : List[str] ) ->Tuple:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A_ ( self : Tuple , a : Tuple , a : int = None ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = self.tf_tokenizer(a )
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones_like(a )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = pad_model_inputs(
a , max_seq_length=a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase :str = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "ibert"
def __init__( self : Optional[Any] , a : Optional[int]=3_05_22 , a : Tuple=7_68 , a : Tuple=12 , a : List[str]=12 , a : str=30_72 , a : str="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Tuple=5_12 , a : Dict=2 , a : str=0.02 , a : Union[str, Any]=1E-12 , a : Tuple=1 , a : List[Any]=0 , a : Optional[Any]=2 , a : List[Any]="absolute" , a : str=False , a : Any="none" , **a : Any , ) ->List[Any]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = quant_mode
SCREAMING_SNAKE_CASE__ : Union[str, Any] = force_dequant
class _a ( lowercase__ ):
"""simple docstring"""
@property
def A_ ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
SCREAMING_SNAKE_CASE__ : List[str] = str(abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
from __future__ import annotations
__lowercase :Optional[int] = list[list[int]]
# assigning initial values to the grid
__lowercase :Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowercase :Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCAmelCase ( _lowerCamelCase : Matrix , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCAmelCase ( _lowerCamelCase : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCAmelCase ( _lowerCamelCase : Matrix ):
'''simple docstring'''
if location := find_empty_location(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = digit
if sudoku(_lowerCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE__ : str = 0
return None
def UpperCAmelCase ( _lowerCamelCase : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(_lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__lowercase :Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self : int , a : str , a : str=13 , a : str=30 , a : Tuple=2 , a : List[Any]=3 , a : List[Any]=True , a : str=True , a : Optional[int]=32 , a : Optional[int]=5 , a : List[str]=4 , a : Optional[int]=37 , a : Tuple="gelu" , a : Optional[int]=0.1 , a : str=0.1 , a : Optional[int]=10 , a : Any=0.02 , a : Union[str, Any]=None , a : Any=2 , ) ->int:
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : List[str] = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : Dict = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Dict = num_patches + 1
def A_ ( self : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A_ ( self : List[Any] ) ->Tuple:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A_ ( self : Tuple , a : Dict , a : Optional[int] , a : str ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = ViTModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Optional[int] , a : Optional[Any] , a : Optional[int] , a : Optional[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : str = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A_ ( self : Optional[int] , a : Optional[int] , a : Optional[int] , a : Any ) ->Any:
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = ViTForImageClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : int = ViTForImageClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : Any ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Tuple ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : str = ViTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def A_ ( self : Tuple ) ->List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def A_ ( self : Union[str, Any] ) ->List[Any]:
pass
def A_ ( self : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def A_ ( self : Dict ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def A_ ( self : Union[str, Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def A_ ( self : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def A_ ( self : Tuple ) ->List[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : int = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : Optional[Any] ) ->Tuple:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def A_ ( self : str ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE__ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def A_ ( self : Tuple ) ->int:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_80 )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=a , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : List[str] = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(a , interpolate_pos_encoding=a )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A_ ( self : int ) ->Any:
SCREAMING_SNAKE_CASE__ : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : str = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=a , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
class _a :
"""simple docstring"""
def __init__( self : Any ) ->None:
SCREAMING_SNAKE_CASE__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE__ : Tuple = False
def A_ ( self : int , a : list[str] ) ->None:
for word in words:
self.insert(a )
def A_ ( self : Dict , a : str ) ->None:
SCREAMING_SNAKE_CASE__ : Tuple = self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE__ : str = TrieNode()
SCREAMING_SNAKE_CASE__ : int = curr.nodes[char]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def A_ ( self : str , a : str ) ->bool:
SCREAMING_SNAKE_CASE__ : Dict = self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE__ : Tuple = curr.nodes[char]
return curr.is_leaf
def A_ ( self : Optional[int] , a : str ) ->None:
def _delete(a : TrieNode , a : str , a : int ) -> bool:
if index == len(a ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE__ : Tuple = False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE__ : Optional[int] = word[index]
SCREAMING_SNAKE_CASE__ : List[Any] = curr.nodes.get(a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE__ : Optional[int] = _delete(a , a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , a , 0 )
def UpperCAmelCase ( _lowerCamelCase : TrieNode , _lowerCamelCase : str ):
'''simple docstring'''
if node.is_leaf:
print(_lowerCamelCase , end=" " )
for key, value in node.nodes.items():
print_words(_lowerCamelCase , word + key )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "banana bananas bandana band apple all beast".split()
SCREAMING_SNAKE_CASE__ : Dict = TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : bool ):
'''simple docstring'''
print(str(_lowerCamelCase ) , "works!" if passes else "doesn't work :(" )
def UpperCAmelCase ( ):
'''simple docstring'''
assert test_trie()
def UpperCAmelCase ( ):
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from __future__ import annotations
import math
import random
from typing import Any
class _a :
"""simple docstring"""
def __init__( self : Any ) ->None:
SCREAMING_SNAKE_CASE__ : list[Any] = []
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : int = 0
def A_ ( self : List[str] ) ->bool:
return self.head == self.tail
def A_ ( self : Optional[int] , a : Any ) ->None:
self.data.append(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tail + 1
def A_ ( self : Union[str, Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : Tuple = self.data[self.head]
SCREAMING_SNAKE_CASE__ : int = self.head + 1
return ret
def A_ ( self : Tuple ) ->int:
return self.tail - self.head
def A_ ( self : Tuple ) ->None:
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class _a :
"""simple docstring"""
def __init__( self : List[Any] , a : Any ) ->None:
SCREAMING_SNAKE_CASE__ : List[Any] = data
SCREAMING_SNAKE_CASE__ : MyNode | None = None
SCREAMING_SNAKE_CASE__ : MyNode | None = None
SCREAMING_SNAKE_CASE__ : int = 1
def A_ ( self : int ) ->Any:
return self.data
def A_ ( self : Union[str, Any] ) ->MyNode | None:
return self.left
def A_ ( self : List[Any] ) ->MyNode | None:
return self.right
def A_ ( self : Dict ) ->int:
return self.height
def A_ ( self : Optional[Any] , a : Any ) ->None:
SCREAMING_SNAKE_CASE__ : List[str] = data
def A_ ( self : Any , a : MyNode | None ) ->None:
SCREAMING_SNAKE_CASE__ : List[str] = node
def A_ ( self : Any , a : MyNode | None ) ->None:
SCREAMING_SNAKE_CASE__ : List[Any] = node
def A_ ( self : Tuple , a : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = height
def UpperCAmelCase ( _lowerCamelCase : MyNode | None ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if a > b:
return a
return b
def UpperCAmelCase ( _lowerCamelCase : MyNode ):
'''simple docstring'''
print("left rotation node:" , node.get_data() )
SCREAMING_SNAKE_CASE__ : str = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCamelCase )
return ret
def UpperCAmelCase ( _lowerCamelCase : MyNode ):
'''simple docstring'''
print("right rotation node:" , node.get_data() )
SCREAMING_SNAKE_CASE__ : List[str] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCamelCase )
return ret
def UpperCAmelCase ( _lowerCamelCase : MyNode ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowerCamelCase ) )
return right_rotation(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : MyNode ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowerCamelCase ) )
return left_rotation(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : MyNode | None , _lowerCamelCase : Any ):
'''simple docstring'''
if node is None:
return MyNode(_lowerCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowerCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE__ : str = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE__ : List[Any] = right_rotation(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lr_rotation(_lowerCamelCase )
else:
node.set_right(insert_node(node.get_right() , _lowerCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE__ : Any = rl_rotation(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = left_rotation(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
return node
def UpperCAmelCase ( _lowerCamelCase : MyNode ):
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE__ : int = right_child
return root.get_data()
def UpperCAmelCase ( _lowerCamelCase : MyNode ):
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : List[Any] = root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE__ : str = left_child
return root.get_data()
def UpperCAmelCase ( _lowerCamelCase : MyNode , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = root.get_left()
SCREAMING_SNAKE_CASE__ : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE__ : Dict = get_left_most(_lowerCamelCase )
root.set_data(_lowerCamelCase )
root.set_right(del_node(_lowerCamelCase , _lowerCamelCase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE__ : List[str] = left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_lowerCamelCase , _lowerCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowerCamelCase , _lowerCamelCase ) )
if get_height(_lowerCamelCase ) - get_height(_lowerCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE__ : str = left_rotation(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : List[str] = rl_rotation(_lowerCamelCase )
elif get_height(_lowerCamelCase ) - get_height(_lowerCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE__ : str = right_rotation(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = lr_rotation(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowerCamelCase )
return root
class _a :
"""simple docstring"""
def __init__( self : List[Any] ) ->None:
SCREAMING_SNAKE_CASE__ : MyNode | None = None
def A_ ( self : Optional[Any] ) ->int:
return get_height(self.root )
def A_ ( self : Optional[Any] , a : Any ) ->None:
print("insert:" + str(a ) )
SCREAMING_SNAKE_CASE__ : Dict = insert_node(self.root , a )
def A_ ( self : Any , a : Any ) ->None:
print("delete:" + str(a ) )
if self.root is None:
print("Tree is empty!" )
return
SCREAMING_SNAKE_CASE__ : str = del_node(self.root , a )
def __str__( self : str , ) ->str: # a level traversale, gives a more intuitive look on the tree
SCREAMING_SNAKE_CASE__ : List[Any] = ""
SCREAMING_SNAKE_CASE__ : List[str] = MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE__ : str = self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE__ : int = 0
while not q.is_empty():
SCREAMING_SNAKE_CASE__ : List[str] = q.pop()
SCREAMING_SNAKE_CASE__ : Optional[int] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(a )
q.push(a )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE__ : Dict = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , a ) - 1:
SCREAMING_SNAKE_CASE__ : List[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowercase :int = AVLtree()
__lowercase :str = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCAmelCase ( _lowerCamelCase : Optional[int]="" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AgentAudio(a )
SCREAMING_SNAKE_CASE__ : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(a , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(a ) )
# Ensure that the file contains the same value as the original tensor
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = sf.read(a )
self.assertTrue(torch.allclose(a , torch.tensor(a ) , atol=1E-4 ) )
def A_ ( self : List[str] ) ->str:
SCREAMING_SNAKE_CASE__ : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
SCREAMING_SNAKE_CASE__ : int = get_new_path(suffix=".wav" )
sf.write(a , a , 1_60_00 )
SCREAMING_SNAKE_CASE__ : Tuple = AgentAudio(a )
self.assertTrue(torch.allclose(a , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , a )
@require_vision
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.randint(0 , 2_56 , (64, 64, 3) )
SCREAMING_SNAKE_CASE__ : List[str] = AgentImage(a )
SCREAMING_SNAKE_CASE__ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(a , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(a ) )
def A_ ( self : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
SCREAMING_SNAKE_CASE__ : str = Image.open(a )
SCREAMING_SNAKE_CASE__ : List[str] = AgentImage(a )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(a ) )
def A_ ( self : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open(a )
SCREAMING_SNAKE_CASE__ : Any = AgentImage(a )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(a ) )
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[str] ) ->int:
SCREAMING_SNAKE_CASE__ : Any = "Hey!"
SCREAMING_SNAKE_CASE__ : List[str] = AgentText(a )
self.assertEqual(a , agent_type.to_string() )
self.assertEqual(a , agent_type.to_raw() )
self.assertEqual(a , a )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : list ):
'''simple docstring'''
_enforce_args(_lowerCamelCase , _lowerCamelCase )
if n == 0:
return 0
SCREAMING_SNAKE_CASE__ : Dict = float("-inf" )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(
_lowerCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _lowerCamelCase ) )
return max_revue
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : list ):
'''simple docstring'''
_enforce_args(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : list , _lowerCamelCase : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
SCREAMING_SNAKE_CASE__ : str = float("-inf" )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(
_lowerCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _lowerCamelCase , _lowerCamelCase ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = max_revenue
return max_rev[n]
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : list ):
'''simple docstring'''
_enforce_args(_lowerCamelCase , _lowerCamelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
SCREAMING_SNAKE_CASE__ : Any = [float("-inf" ) for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , prices[j - 1] + max_rev[i - j] )
SCREAMING_SNAKE_CASE__ : str = max_revenue_i
return max_rev[n]
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : list ):
'''simple docstring'''
if n < 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(_lowerCamelCase )
if n > len(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = (
"Each integral piece of rod must have a corresponding price. "
f"""Got n = {n} but length of prices = {len(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = [6, 10, 12, 15, 20, 23]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_lowerCamelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
SCREAMING_SNAKE_CASE__ : Optional[Any] = 36
SCREAMING_SNAKE_CASE__ : Tuple = top_down_cut_rod(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = bottom_up_cut_rod(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = naive_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowercase :Tuple = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[str] , a : Path , a : Union[str, None] = None , a : Union[List[str], None] = None , a : Union[str, List[str], None] = None , a : bool = True , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [file for file in os.listdir(a ) if os.path.isfile(os.path.join(a , a ) )]
if identifier is not None:
SCREAMING_SNAKE_CASE__ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a ):
for n_ in n_identifier:
SCREAMING_SNAKE_CASE__ : Tuple = [file for file in files if n_ not in file]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [file for file in files if n_identifier not in file]
SCREAMING_SNAKE_CASE__ : int = ignore_files or []
ignore_files.append("__init__.py" )
SCREAMING_SNAKE_CASE__ : Dict = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a )
if only_modules:
SCREAMING_SNAKE_CASE__ : Optional[int] = file.split("." )[0]
try:
SCREAMING_SNAKE_CASE__ : str = getattr(a , a )
SCREAMING_SNAKE_CASE__ : Dict = doctest.DocTestSuite(a )
SCREAMING_SNAKE_CASE__ : Dict = unittest.TextTestRunner().run(a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
SCREAMING_SNAKE_CASE__ : List[str] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A_ ( self : Union[str, Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = Path("src/transformers" )
SCREAMING_SNAKE_CASE__ : List[Any] = "modeling"
SCREAMING_SNAKE_CASE__ : Tuple = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a )
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = Path("src/transformers" )
SCREAMING_SNAKE_CASE__ : List[Any] = "tokenization"
self.analyze_directory(a , identifier=a )
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("src/transformers" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "configuration"
self.analyze_directory(a , identifier=a )
def A_ ( self : str ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path("src/transformers" )
SCREAMING_SNAKE_CASE__ : Optional[int] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a )
def A_ ( self : Optional[int] ) ->Any:
SCREAMING_SNAKE_CASE__ : Any = Path("docs/source" )
SCREAMING_SNAKE_CASE__ : List[str] = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a )
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
def decorator(_lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE__ : Tuple = getattr(_lowerCamelCase , "handle_key" , [] )
handle += [key]
setattr(_lowerCamelCase , "handle_key" , _lowerCamelCase )
return func
return decorator
def UpperCAmelCase ( *_lowerCamelCase : List[str] ):
'''simple docstring'''
def decorator(_lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(_lowerCamelCase , "handle_key" , [] )
handle += keys
setattr(_lowerCamelCase , "handle_key" , _lowerCamelCase )
return func
return decorator
class _a ( lowercase__ ):
"""simple docstring"""
def __new__( cls : Any , a : Any , a : Any , a : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[str] = super().__new__(cls , a , a , a )
if not hasattr(a , "key_handler" ):
setattr(a , "key_handler" , {} )
setattr(a , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(a , "handle_key" , [] )
for key in handled_keys:
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return new_cls
@staticmethod
def A_ ( cls : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_character()
if char != KEYMAP["undefined"]:
SCREAMING_SNAKE_CASE__ : Dict = ord(a )
SCREAMING_SNAKE_CASE__ : Tuple = cls.key_handler.get(a )
if handler:
SCREAMING_SNAKE_CASE__ : int = char
return handler(cls )
else:
return None
def UpperCAmelCase ( cls : Union[str, Any] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self : List[Any] , a : List[str] , a : Optional[Any]=2 , a : Optional[int]=True , a : Dict=False , a : List[Any]=10 , a : str=3 , a : Any=32 * 4 , a : Dict=32 * 6 , a : Tuple=4 , a : List[Any]=32 , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : List[Any] = num_queries
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Dict = min_size
SCREAMING_SNAKE_CASE__ : Tuple = max_size
SCREAMING_SNAKE_CASE__ : Any = num_labels
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_size
def A_ ( self : Tuple ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a )
SCREAMING_SNAKE_CASE__ : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=a ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A_ ( self : Union[str, Any] ) ->List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A_ ( self : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : int = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A_ ( self : List[Any] , a : List[str] , a : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , config.decoder_config.decoder_layers )
def A_ ( self : str , a : Optional[Any] , a : Optional[int] , a : Any , a : int=False ) ->str:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = MaskFormerModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(pixel_values=a , pixel_mask=a )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a , output_hidden_states=a )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a , a )
def A_ ( self : Dict , a : Tuple , a : str , a : Tuple , a : List[str] , a : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[int] = MaskFormerForInstanceSegmentation(config=a )
model.to(a )
model.eval()
def comm_check_on_output(a : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(pixel_values=a , pixel_mask=a )
SCREAMING_SNAKE_CASE__ : Tuple = model(a )
comm_check_on_output(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
pixel_values=a , pixel_mask=a , mask_labels=a , class_labels=a )
comm_check_on_output(a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Any ) ->int:
SCREAMING_SNAKE_CASE__ : int = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a , has_text_modality=a )
def A_ ( self : Any ) ->Tuple:
self.config_tester.run_common_tests()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a , **a , output_hidden_states=a )
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*a )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def A_ ( self : List[str] ) ->Optional[Any]:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def A_ ( self : Any ) ->Any:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def A_ ( self : Optional[Any] ) ->Dict:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def A_ ( self : Dict ) ->int:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A_ ( self : Union[str, Any] ) ->Tuple:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self : int ) ->Dict:
pass
def A_ ( self : Tuple ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
@slow
def A_ ( self : Union[str, Any] ) ->List[str]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE__ : str = MaskFormerModel.from_pretrained(a )
self.assertIsNotNone(a )
def A_ ( self : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=a ),
"mask_labels": torch.randn((2, 10, *size) , device=a ),
"class_labels": torch.zeros(2 , 10 , device=a ).long(),
}
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(a )
SCREAMING_SNAKE_CASE__ : Dict = model(**a )
self.assertTrue(outputs.loss is not None )
def A_ ( self : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a , **a , output_hidden_states=a )
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a ).to(a )
SCREAMING_SNAKE_CASE__ : str = model(**a , output_attentions=a )
self.assertTrue(outputs.attentions is not None )
def A_ ( self : Any ) ->List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ : Dict = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(a )
model.to(a )
model.train()
SCREAMING_SNAKE_CASE__ : Tuple = model(a , mask_labels=a , class_labels=a ).loss
loss.backward()
def A_ ( self : Tuple ) ->Optional[Any]:
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ : List[str] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Any = model_class(a )
model.to(a )
model.train()
SCREAMING_SNAKE_CASE__ : Any = model(a , mask_labels=a , class_labels=a )
SCREAMING_SNAKE_CASE__ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE__ : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowercase :int = 1e-4
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : Tuple ) ->Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def A_ ( self : List[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(a , return_tensors="pt" ).to(a )
SCREAMING_SNAKE_CASE__ : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a , atol=a ) )
def A_ ( self : List[str] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(a )
.eval()
)
SCREAMING_SNAKE_CASE__ : int = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a , return_tensors="pt" ).to(a )
SCREAMING_SNAKE_CASE__ : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**a )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ : int = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(a ).to(a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) )
def A_ ( self : Tuple ) ->int:
SCREAMING_SNAKE_CASE__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(a )
.eval()
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(a , return_tensors="pt" ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**a )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ : List[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(a ).to(a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) )
def A_ ( self : str ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(a )
.eval()
)
SCREAMING_SNAKE_CASE__ : int = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Dict = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : List[str] = inputs["pixel_values"].to(a )
SCREAMING_SNAKE_CASE__ : int = [el.to(a ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE__ : Optional[int] = [el.to(a ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**a )
self.assertTrue(outputs.loss is not None )
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
from math import factorial
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_lowerCamelCase ) // (factorial(_lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
"If a class of 40 students must be arranged into groups of",
f"4 for group projects, there are {combinations(40, 4)} ways",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"are {combinations(10, 3)} ways that first, second and",
"third place can be awarded.",
)
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Union[str, Any] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Tuple = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowercase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = emb.weight.shape
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = emb.weight.data
return lin_layer
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE__ : Dict = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE__ : int = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("final_layer_norm" , "ff_layer_norm" )
SCREAMING_SNAKE_CASE__ : Dict = state_dict[old_key]
return new_dict
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str = WEIGHTS_NAME ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : str = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.load(_lowerCamelCase )["model"]
remove_ignore_keys_(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(
_lowerCamelCase , weights_name.replace(".bin" , f"""-{len(_lowerCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE__ : int = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , f"""-{len(_lowerCamelCase )+1:05d}-of-???.bin""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
for idx, shard in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin""" )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
SCREAMING_SNAKE_CASE__ : Dict = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE__ : List[str] = {"total_size": total_size}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + "\n"
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__lowercase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__lowercase :Any = parser.parse_args()
__lowercase , __lowercase :Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__lowercase :Union[str, Any] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__lowercase :Union[str, Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict , a : int , a : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.ones((batch_size, length) ) / length
return scores
def A_ ( self : Any ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : int = 20
SCREAMING_SNAKE_CASE__ : int = self._get_uniform_logits(batch_size=2 , length=a )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE__ : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE__ : str = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE__ : int = jax.nn.softmax(a , axis=-1 )
SCREAMING_SNAKE_CASE__ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE__ : Dict = jax.nn.softmax(temp_dist_warper_sharper(a , scores.copy() , cur_len=a ) , axis=-1 )
SCREAMING_SNAKE_CASE__ : Dict = jax.nn.softmax(temp_dist_warper_smoother(a , scores.copy() , cur_len=a ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Dict = 10
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
# create ramp distribution
SCREAMING_SNAKE_CASE__ : str = np.broadcast_to(np.arange(a )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE__ : List[str] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE__ : Any = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ : Any = top_k_warp(a , a , cur_len=a )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE__ : Dict = 5
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE__ : List[str] = np.broadcast_to(np.arange(a )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE__ : Dict = top_k_warp_safety_check(a , a , cur_len=a )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def A_ ( self : List[str] ) ->Any:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
SCREAMING_SNAKE_CASE__ : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE__ : Dict = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE__ : Dict = np.exp(top_p_warp(a , a , cur_len=a ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE__ : str = np.broadcast_to(np.arange(a )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE__ : int = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = top_p_warp(a , a , cur_len=a )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def A_ ( self : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = 20
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ : int = 5
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._get_uniform_logits(a , a )
SCREAMING_SNAKE_CASE__ : List[str] = min_dist_processor(a , a , cur_len=a )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE__ : Tuple = self._get_uniform_logits(a , a )
SCREAMING_SNAKE_CASE__ : List[str] = 15
SCREAMING_SNAKE_CASE__ : Dict = min_dist_processor(a , a , cur_len=a )
self.assertFalse(jnp.isinf(a ).any() )
def A_ ( self : Dict ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = 20
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Dict = self._get_uniform_logits(a , a )
SCREAMING_SNAKE_CASE__ : Optional[int] = logits_processor(a , a , cur_len=a )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE__ : Any = 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._get_uniform_logits(a , a )
SCREAMING_SNAKE_CASE__ : Tuple = logits_processor(a , a , cur_len=a )
self.assertFalse(jnp.isinf(a ).any() )
def A_ ( self : List[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 20
SCREAMING_SNAKE_CASE__ : List[str] = 4
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 5
SCREAMING_SNAKE_CASE__ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=a , eos_token_id=a )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : List[Any] = self._get_uniform_logits(a , a )
SCREAMING_SNAKE_CASE__ : str = logits_processor(a , a , cur_len=a )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._get_uniform_logits(a , a )
SCREAMING_SNAKE_CASE__ : int = logits_processor(a , a , cur_len=a )
self.assertFalse(jnp.isinf(a ).any() )
def A_ ( self : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Dict = 4
SCREAMING_SNAKE_CASE__ : List[str] = 10
SCREAMING_SNAKE_CASE__ : Any = 15
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((batch_size, sequence_length) , a )
SCREAMING_SNAKE_CASE__ : List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE__ : List[Any] = self._get_uniform_logits(a , a )
SCREAMING_SNAKE_CASE__ : Dict = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ : str = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a )
SCREAMING_SNAKE_CASE__ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a )
SCREAMING_SNAKE_CASE__ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=a , eos_token_id=a )
SCREAMING_SNAKE_CASE__ : List[str] = 10
# no processor list
SCREAMING_SNAKE_CASE__ : Any = temp_dist_warp(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : str = top_k_warp(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : int = top_p_warp(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : Dict = min_dist_proc(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bos_dist_proc(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : int = eos_dist_proc(a , a , cur_len=a )
# with processor list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE__ : List[Any] = processor(a , a , cur_len=a )
# scores should be equal
self.assertTrue(jnp.allclose(a , a , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def A_ ( self : List[str] ) ->str:
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Dict = 10
SCREAMING_SNAKE_CASE__ : Optional[int] = 15
SCREAMING_SNAKE_CASE__ : Tuple = 2
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((batch_size, sequence_length) , a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.copy()
SCREAMING_SNAKE_CASE__ : Any = self._get_uniform_logits(a , a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE__ : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a )
SCREAMING_SNAKE_CASE__ : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=a , eos_token_id=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
# no processor list
def run_no_processor_list(a : Union[str, Any] , a : Optional[int] , a : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = temp_dist_warp(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : str = top_k_warp(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : List[str] = top_p_warp(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : int = min_dist_proc(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : List[Any] = bos_dist_proc(a , a , cur_len=a )
SCREAMING_SNAKE_CASE__ : str = eos_dist_proc(a , a , cur_len=a )
return scores
# with processor list
def run_processor_list(a : int , a : Union[str, Any] , a : Dict ):
SCREAMING_SNAKE_CASE__ : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE__ : List[Any] = processor(a , a , cur_len=a )
return scores
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.jit(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.jit(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jitted_run_no_processor_list(a , a , a )
SCREAMING_SNAKE_CASE__ : Dict = jitted_run_processor_list(a , a , a )
# scores should be equal
self.assertTrue(jnp.allclose(a , a , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Any = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[int] = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[Any] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Tuple = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__lowercase :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Optional[Any] = logging.get_logger(__name__)
__lowercase :List[str] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "trocr"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : int , a : int=5_02_65 , a : int=10_24 , a : Tuple=12 , a : int=16 , a : Optional[int]=40_96 , a : Dict="gelu" , a : str=5_12 , a : Optional[Any]=0.1 , a : int=0.0 , a : Optional[int]=0.0 , a : List[Any]=2 , a : str=0.02 , a : Dict=0.0 , a : List[Any]=True , a : int=False , a : List[str]=True , a : str=True , a : List[Any]=1 , a : Dict=0 , a : Any=2 , **a : int , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Tuple = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Dict = init_std
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = use_cache
SCREAMING_SNAKE_CASE__ : Any = scale_embedding
SCREAMING_SNAKE_CASE__ : int = use_learned_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
import functools
from typing import Any
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : list[str] ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
SCREAMING_SNAKE_CASE__ : dict[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "WORD_KEEPER"
for word in words:
SCREAMING_SNAKE_CASE__ : Optional[Any] = trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : str = trie_node[c]
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCamelCase : int ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE__ : Optional[int] = trie
for i in range(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : str = trie_node.get(string[i] , _lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
'''simple docstring'''
if gpta_config_file == "":
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaConfig()
else:
SCREAMING_SNAKE_CASE__ : Dict = GPTaConfig.from_json_file(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ : List[str] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__lowercase :Optional[int] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
__lowercase :Tuple = 100
__lowercase :Optional[int] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__lowercase :int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
SCREAMING_SNAKE_CASE__ : set[int] = set()
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase ( _lowerCamelCase : int = 5_000 ):
'''simple docstring'''
for number_to_partition in range(1 , _lowerCamelCase ):
if len(partition(_lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : List[Any] , a : str=None , a : List[str]=None , a : Optional[int]=None , a : Union[str, Any]="resnet50" , a : Union[str, Any]=3 , a : Any=32 , a : str=3 , a : Dict=True , a : Dict=True , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : List[Any] = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE__ : List[Any] = stage_names
SCREAMING_SNAKE_CASE__ : Any = out_features
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
def A_ ( self : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, pixel_values
def A_ ( self : Tuple ) ->str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A_ ( self : Dict , a : Optional[Any] , a : Any ) ->Any:
SCREAMING_SNAKE_CASE__ : int = TimmBackbone(config=a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def A_ ( self : List[str] ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _a ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TimmBackbone,) if is_torch_available() else ()
snake_case_ = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : List[str] ) ->Any:
SCREAMING_SNAKE_CASE__ : int = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a )
def A_ ( self : Any ) ->Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : List[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = "resnet18"
SCREAMING_SNAKE_CASE__ : Optional[int] = "microsoft/resnet-18"
SCREAMING_SNAKE_CASE__ : Tuple = AutoBackbone.from_pretrained(a , use_timm_backbone=a )
SCREAMING_SNAKE_CASE__ : Dict = AutoBackbone.from_pretrained(a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE__ : str = AutoBackbone.from_pretrained(a , use_timm_backbone=a , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE__ : Any = AutoBackbone.from_pretrained(a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def A_ ( self : Dict ) ->Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def A_ ( self : Optional[Any] ) ->List[Any]:
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def A_ ( self : Tuple ) ->Optional[Any]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def A_ ( self : Dict ) ->Optional[int]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def A_ ( self : Dict ) ->str:
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def A_ ( self : List[str] ) ->Any:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def A_ ( self : Optional[int] ) ->int:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def A_ ( self : List[str] ) ->List[str]:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def A_ ( self : int ) ->List[Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def A_ ( self : List[Any] ) ->Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def A_ ( self : Optional[Any] ) ->Tuple:
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def A_ ( self : int ) ->Tuple:
pass
@unittest.skip("Safetensors is not supported by timm." )
def A_ ( self : int ) ->List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self : int ) ->Tuple:
pass
def A_ ( self : Dict ) ->Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(a )
SCREAMING_SNAKE_CASE__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Optional[int] = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE__ : str = self.all_model_classes[0]
SCREAMING_SNAKE_CASE__ : str = model_class(a )
model.to(a )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE__ : int = model(**a )
SCREAMING_SNAKE_CASE__ : int = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE__ : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A_ ( self : Optional[int] ) ->List[str]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(**a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE__ : Optional[int] = copy.deepcopy(a )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Any = model_class(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a )
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
from math import factorial
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float ):
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
SCREAMING_SNAKE_CASE__ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE__ : Any = float(factorial(_lowerCamelCase ) )
coefficient /= factorial(_lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | str] ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def UpperCAmelCase ( _lowerCamelCase : list[int | str] , _lowerCamelCase : list[int | str] , _lowerCamelCase : int , _lowerCamelCase : list[int] , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
__lowercase :list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase :list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Any = logging.get_logger(__name__)
__lowercase :Union[str, Any] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "decision_transformer"
snake_case_ = ["past_key_values"]
snake_case_ = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , a : Optional[int]=17 , a : Any=4 , a : int=1_28 , a : Tuple=40_96 , a : List[Any]=True , a : int=1 , a : Any=10_24 , a : Optional[Any]=3 , a : List[str]=1 , a : str=None , a : Dict="relu" , a : int=0.1 , a : Tuple=0.1 , a : Optional[int]=0.1 , a : Tuple=1E-5 , a : str=0.02 , a : Optional[int]=True , a : Optional[int]=True , a : Tuple=5_02_56 , a : List[Any]=5_02_56 , a : int=False , a : Optional[Any]=False , **a : List[Any] , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dim
SCREAMING_SNAKE_CASE__ : Tuple = act_dim
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = max_ep_len
SCREAMING_SNAKE_CASE__ : List[str] = action_tanh
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : str = n_positions
SCREAMING_SNAKE_CASE__ : int = n_layer
SCREAMING_SNAKE_CASE__ : List[str] = n_head
SCREAMING_SNAKE_CASE__ : Dict = n_inner
SCREAMING_SNAKE_CASE__ : Tuple = activation_function
SCREAMING_SNAKE_CASE__ : List[Any] = resid_pdrop
SCREAMING_SNAKE_CASE__ : Tuple = embd_pdrop
SCREAMING_SNAKE_CASE__ : Dict = attn_pdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = scale_attn_weights
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ : List[Any] = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ : int = bos_token_id
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a )
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
import numpy as np
import qiskit
def UpperCAmelCase ( _lowerCamelCase : int = 8 , _lowerCamelCase : int | None = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ : Optional[Any] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ : List[str] = rng.integers(2 , size=_lowerCamelCase )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ : str = rng.integers(2 , size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ : List[str] = rng.integers(2 , size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ : Tuple = qiskit.QuantumCircuit(_lowerCamelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ : Tuple = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1 , seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ : Optional[int] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ : Any = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ : Optional[int] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase , "0" )
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_lowerCamelCase )
# We need to create solution object to save path.
SCREAMING_SNAKE_CASE__ : Optional[int] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
SCREAMING_SNAKE_CASE__ : Tuple = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase )
if solved:
print("\n".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
return True
SCREAMING_SNAKE_CASE__ : Any = (not i < 0) and (not j < 0) # Check lower bounds
SCREAMING_SNAKE_CASE__ : Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
SCREAMING_SNAKE_CASE__ : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase )
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase )
):
return True
SCREAMING_SNAKE_CASE__ : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
SCREAMING_SNAKE_CASE__ : Any = precision
SCREAMING_SNAKE_CASE__ : List[str] = ceil(precision / 14 )
SCREAMING_SNAKE_CASE__ : Optional[int] = 426_880 * Decimal(10_005 ).sqrt()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 13_591_409
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowercase :Optional[Any] = 50
print(f"The first {n} digits of pi is: {pi(n)}")
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCAmelCase ( _lowerCamelCase : Dict="ro" , _lowerCamelCase : str="en" , _lowerCamelCase : List[Any]="wmt16" , _lowerCamelCase : str=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
SCREAMING_SNAKE_CASE__ : Any = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
SCREAMING_SNAKE_CASE__ : str = datasets.load_dataset(_lowerCamelCase , _lowerCamelCase )
if save_dir is None:
SCREAMING_SNAKE_CASE__ : str = f"""{dataset}-{pair}"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(_lowerCamelCase )
save_dir.mkdir(exist_ok=_lowerCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "val" if split == "validation" else split
SCREAMING_SNAKE_CASE__ : Optional[int] = save_dir.joinpath(f"""{fn}.source""" )
SCREAMING_SNAKE_CASE__ : Tuple = save_dir.joinpath(f"""{fn}.target""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = src_path.open("w+" )
SCREAMING_SNAKE_CASE__ : str = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowercase :Dict = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : str ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = _TestCommandArgs(dataset=_lowerCamelCase , all_configs=_lowerCamelCase , save_infos=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = TestCommand(*_lowerCamelCase )
test_command.run()
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_lowerCamelCase , "README.md" )
assert os.path.exists(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = DatasetInfosDict.from_directory(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_351_563,
"num_examples": 10_000,
},
{
"name": "validation",
"num_bytes": 238_418,
"num_examples": 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = getattr(dataset_infos["default"] , _lowerCamelCase ), getattr(expected_dataset_infos["default"] , _lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(_lowerCamelCase , _lowerCamelCase )
elif key == "splits":
assert list(_lowerCamelCase ) == list(_lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE__ : Tuple = tf_top_k_top_p_filtering(a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE__ : Tuple = output[output != -float("inf" )]
SCREAMING_SNAKE_CASE__ : Dict = tf.cast(
tf.where(tf.not_equal(a , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a , a , rtol=1E-12 )
tf.debugging.assert_equal(a , a )
@require_tf
class _a ( unittest.TestCase , lowercase__ ):
"""simple docstring"""
if is_tf_available():
snake_case_ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def A_ ( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : List[Any] = 2
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self : List[str] , a : Any ) ->List[str]:
super(a , self ).__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=a , )
def A_ ( self : Tuple , a : Optional[Any] , a : str ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[2, 0], [1_02, 1_03]]
SCREAMING_SNAKE_CASE__ : List[str] = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={"serving_default": dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.saved_model.load(a ).signatures["serving_default"]
for batch_size in range(1 , len(a ) + 1 ):
SCREAMING_SNAKE_CASE__ : int = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = serving_func(**a )["sequences"]
SCREAMING_SNAKE_CASE__ : List[str] = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
def A_ ( self : List[str] ) ->Any:
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Any = 2
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self : List[str] , a : Union[str, Any] ) ->str:
super(a , self ).__init__()
SCREAMING_SNAKE_CASE__ : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=a , )
def A_ ( self : str , a : Tuple , a : List[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Tuple = [[2], [1_02, 1_03]]
SCREAMING_SNAKE_CASE__ : str = [[1], [1, 1]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={"serving_default": dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.saved_model.load(a ).signatures["serving_default"]
for input_row in range(len(a ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = serving_func(**a )["sequences"]
SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
@require_tensorflow_text
def A_ ( self : Tuple ) ->List[str]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=a )
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int ) ->str:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a , "spiece.model" ) , "rb" ).read() )
SCREAMING_SNAKE_CASE__ : str = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def A_ ( self : Any , a : Union[str, Any] , *a : Optional[int] , **a : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer.tokenize(a )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = text.pad_model_inputs(
a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE__ : List[Any] = self.model.generate(input_ids=a , attention_mask=a )
return self.tokenizer.detokenize(a )
SCREAMING_SNAKE_CASE__ : Tuple = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
SCREAMING_SNAKE_CASE__ : List[Any] = complete_model(a )
SCREAMING_SNAKE_CASE__ : List[str] = tf.keras.Model(a , a )
keras_model.save(a )
def A_ ( self : Dict ) ->Optional[int]:
# Has PT equivalent: this test relies on random sampling
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
SCREAMING_SNAKE_CASE__ : Dict = 14
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE__ : int = "Hello, my dog is cute and"
SCREAMING_SNAKE_CASE__ : int = tokenizer(a , return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Any = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE__ : str = [6_38, 1_98]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def A_ ( self : Optional[int] ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE__ : str = "Hugging Face is a technology company based in New York and Paris."
SCREAMING_SNAKE_CASE__ : str = bart_tokenizer(a , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE__ : Optional[int] = bart_model.generate(a ).numpy()
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Optional[Any] , a : List[str] , a : Dict=None , **a : str ) ->Optional[int]:
return super().call(a , **a )
SCREAMING_SNAKE_CASE__ : Dict = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bart_model.generate(a , foo="bar" ).numpy()
self.assertTrue(np.array_equal(a , a ) )
class _a ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def A_ ( self : Any , a : Tuple , **a : Union[str, Any] ) ->Any:
return super().call(a , **a )
SCREAMING_SNAKE_CASE__ : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE__ : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE__ : Tuple = bart_model.generate(a ).numpy()
with self.assertRaises(a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a , foo="bar" )
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : Dict , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = True , **a : Tuple , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : str = size if size is not None else {"shortest_edge": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : int = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : int = get_size_dict(a , default_to_square=a , param_name="crop_size" )
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : Optional[Any] = size
SCREAMING_SNAKE_CASE__ : Optional[Any] = resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Any = crop_size
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE__ : Tuple = do_convert_rgb
def A_ ( self : Optional[int] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : str = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : int = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : str , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ) ->Dict:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Dict , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : int , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : int = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = None , a : Optional[Union[str, TensorType]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , **a : Union[str, Any] , ) ->PIL.Image.Image:
SCREAMING_SNAKE_CASE__ : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , param_name="size" , default_to_square=a )
SCREAMING_SNAKE_CASE__ : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , param_name="crop_size" , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE__ : List[Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE__ : Tuple = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : Dict = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Tuple = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__lowercase :Optional[int] = int(input("Enter number: ").strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowercase :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple , a : List[Any] , a : Optional[int] ) ->Any:
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : str , a : int = 1 , a : int = 1_00 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[float] = None , a : bool = True , ) ->Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
SCREAMING_SNAKE_CASE__ : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
SCREAMING_SNAKE_CASE__ : Tuple = audio_length_in_s * self.unet.config.sample_rate
SCREAMING_SNAKE_CASE__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
SCREAMING_SNAKE_CASE__ : str = int(a )
if sample_size % down_scale_factor != 0:
SCREAMING_SNAKE_CASE__ : List[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
SCREAMING_SNAKE_CASE__ : Tuple = int(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
SCREAMING_SNAKE_CASE__ : str = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE__ : Any = randn_tensor(a , generator=a , device=self.device , dtype=a )
# set step values
self.scheduler.set_timesteps(a , device=audio.device )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.timesteps.to(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ : Any = self.unet(a , a ).sample
# 2. compute previous image: x_t -> t_t-1
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.step(a , a , a ).prev_sample
SCREAMING_SNAKE_CASE__ : List[str] = audio.clamp(-1 , 1 ).float().cpu().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a )
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def A_ ( *a : Dict , **a : Dict ) ->Any:
pass
@is_pipeline_test
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def A_ ( self : Tuple , a : Optional[int] , a : List[Any] , a : Any ) ->Any:
SCREAMING_SNAKE_CASE__ : int = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
SCREAMING_SNAKE_CASE__ : str = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def A_ ( self : Dict , a : Union[str, Any] , a : Optional[int] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = vqa_pipeline(a , top_k=1 )
self.assertEqual(
a , [
[{"score": ANY(a ), "answer": ANY(a )}],
[{"score": ANY(a ), "answer": ANY(a )}],
] , )
@require_torch
def A_ ( self : Any ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
SCREAMING_SNAKE_CASE__ : Dict = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE__ : List[Any] = "How many cats are there?"
SCREAMING_SNAKE_CASE__ : Optional[int] = vqa_pipeline(image=a , question="How many cats are there?" , top_k=2 )
self.assertEqual(
a , [{"score": ANY(a ), "answer": ANY(a )}, {"score": ANY(a ), "answer": ANY(a )}] )
SCREAMING_SNAKE_CASE__ : str = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
a , [{"score": ANY(a ), "answer": ANY(a )}, {"score": ANY(a ), "answer": ANY(a )}] )
@slow
@require_torch
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
SCREAMING_SNAKE_CASE__ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE__ : List[str] = "How many cats are there?"
SCREAMING_SNAKE_CASE__ : int = vqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
SCREAMING_SNAKE_CASE__ : str = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
SCREAMING_SNAKE_CASE__ : List[Any] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def A_ ( self : Tuple ) ->Optional[Any]:
pass
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = emb.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = emb.weight.data
return lin_layer
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : int="facebook/mbart-large-en-ro" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
remove_ignore_keys_(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict["encoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE__ : int = MBartConfig.from_pretrained(_lowerCamelCase , vocab_size=_lowerCamelCase )
if mbart_aa and finetuned:
SCREAMING_SNAKE_CASE__ : Optional[int] = "relu"
SCREAMING_SNAKE_CASE__ : Any = state_dict["decoder.embed_tokens.weight"]
SCREAMING_SNAKE_CASE__ : List[Any] = MBartForConditionalGeneration(_lowerCamelCase )
model.model.load_state_dict(_lowerCamelCase )
if finetuned:
SCREAMING_SNAKE_CASE__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowercase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__lowercase :Dict = parser.parse_args()
__lowercase :Union[str, Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__lowercase :Optional[int] = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
__lowercase :Dict = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
__lowercase :Tuple = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__lowercase :Tuple = f"down_blocks.{i}.resnets.{j}."
__lowercase :List[Any] = f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__lowercase :Any = f"down_blocks.{i}.attentions.{j}."
__lowercase :Union[str, Any] = f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__lowercase :Optional[int] = f"up_blocks.{i}.resnets.{j}."
__lowercase :List[Any] = f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__lowercase :Tuple = f"up_blocks.{i}.attentions.{j}."
__lowercase :int = f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__lowercase :str = f"down_blocks.{i}.downsamplers.0.conv."
__lowercase :List[Any] = f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__lowercase :Optional[Any] = f"up_blocks.{i}.upsamplers.0."
__lowercase :List[Any] = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__lowercase :List[Any] = "mid_block.attentions.0."
__lowercase :Any = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__lowercase :Tuple = f"mid_block.resnets.{j}."
__lowercase :int = f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE__ : Dict = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE__ : int = v.replace(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE__ : Any = v.replace(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = v
SCREAMING_SNAKE_CASE__ : int = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__lowercase :str = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__lowercase :Union[str, Any] = f"encoder.down_blocks.{i}.resnets.{j}."
__lowercase :Optional[Any] = f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__lowercase :List[Any] = f"down_blocks.{i}.downsamplers.0."
__lowercase :Tuple = f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__lowercase :Any = f"up_blocks.{i}.upsamplers.0."
__lowercase :Any = f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__lowercase :Dict = f"decoder.up_blocks.{i}.resnets.{j}."
__lowercase :Union[str, Any] = f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__lowercase :Optional[Any] = f"mid_block.resnets.{i}."
__lowercase :Tuple = f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__lowercase :int = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def UpperCAmelCase ( _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase ( _lowerCamelCase : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE__ : int = v.replace(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE__ : Tuple = v.replace(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = v
SCREAMING_SNAKE_CASE__ : Optional[int] = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE__ : str = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
SCREAMING_SNAKE_CASE__ : List[Any] = reshape_weight_for_sd(_lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__lowercase :List[Any] = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
__lowercase :Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__lowercase :Tuple = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__lowercase :Any = {"q": 0, "k": 1, "v": 2}
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {}
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : int = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
SCREAMING_SNAKE_CASE__ : List[Any] = k[: -len(".q_proj.weight" )]
SCREAMING_SNAKE_CASE__ : str = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE__ : Optional[int] = [None, None, None]
SCREAMING_SNAKE_CASE__ : List[Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
SCREAMING_SNAKE_CASE__ : int = k[: -len(".q_proj.bias" )]
SCREAMING_SNAKE_CASE__ : Optional[int] = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE__ : Tuple = [None, None, None]
SCREAMING_SNAKE_CASE__ : Optional[Any] = v
continue
SCREAMING_SNAKE_CASE__ : Dict = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE__ : str = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(_lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE__ : str = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(_lowerCamelCase )
return new_state_dict
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
__lowercase :List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
__lowercase :str = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__lowercase :List[Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
__lowercase :int = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
__lowercase :Dict = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__lowercase :List[str] = load_file(unet_path, device="cpu")
else:
__lowercase :int = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
__lowercase :Tuple = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
__lowercase :Union[str, Any] = load_file(vae_path, device="cpu")
else:
__lowercase :Union[str, Any] = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
__lowercase :Union[str, Any] = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
__lowercase :Union[str, Any] = load_file(text_enc_path, device="cpu")
else:
__lowercase :Dict = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
__lowercase :Tuple = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
__lowercase :int = convert_unet_state_dict(unet_state_dict)
__lowercase :List[str] = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__lowercase :List[str] = convert_vae_state_dict(vae_state_dict)
__lowercase :List[str] = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__lowercase :List[str] = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__lowercase :int = {"transformer." + k: v for k, v in text_enc_dict.items()}
__lowercase :Optional[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
__lowercase :List[Any] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
__lowercase :Dict = convert_text_enc_state_dict(text_enc_dict)
__lowercase :Tuple = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__lowercase :Optional[int] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__lowercase :Tuple = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__lowercase :Dict = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int , a : int = 16 , a : int = 88 , a : Optional[int] = None , a : int = 1 , a : float = 0.0 , a : int = 32 , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : Optional[int] = None , a : str = "geglu" , a : Optional[int] = None , ) ->Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ : Dict = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a , attention_head_dim=a , in_channels=a , num_layers=a , dropout=a , norm_num_groups=a , cross_attention_dim=a , attention_bias=a , sample_size=a , num_vector_embeds=a , activation_fn=a , num_embeds_ada_norm=a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE__ : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE__ : Optional[Any] = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 0]
def A_ ( self : Dict , a : Optional[Any] , a : Union[str, Any] , a : int=None , a : Dict=None , a : Any=None , a : bool = True , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE__ : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE__ : str = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE__ : List[str] = self.transformers[transformer_index](
a , encoder_hidden_states=a , timestep=a , cross_attention_kwargs=a , return_dict=a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE__ : str = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE__ : str = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a )
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
import argparse
import os
import re
import packaging.version
__lowercase :int = "examples/"
__lowercase :List[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowercase :List[Any] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowercase :List[str] = "README.md"
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Any ):
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE__ : List[Any] = replace.replace("VERSION" , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="examples" )
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Any=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE__ : str = "1. Want to contribute a new model?"
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE__ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE__ : str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.read()
SCREAMING_SNAKE_CASE__ : str = REPLACE_PATTERNS["init"][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE__ : str = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE__ : List[str] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
SCREAMING_SNAKE_CASE__ : List[str] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE__ : List[str] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCamelCase ) == 0:
SCREAMING_SNAKE_CASE__ : int = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = get_version()
SCREAMING_SNAKE_CASE__ : List[Any] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
SCREAMING_SNAKE_CASE__ : Optional[int] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE__ : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCamelCase ) == 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowercase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowercase :Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=0.9_9_9 , _lowerCamelCase : List[str]="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase : Optional[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase : Optional[Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
SCREAMING_SNAKE_CASE__ : int = []
for i in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class _a ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , a : int = 10_00 , a : str = "fixed_small_log" , a : bool = True , a : Optional[float] = 1.0 , a : str = "epsilon" , a : str = "squaredcos_cap_v2" , ) ->List[str]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
SCREAMING_SNAKE_CASE__ : List[str] = betas_for_alpha_bar(a )
SCREAMING_SNAKE_CASE__ : Dict = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ : Dict = torch.cumprod(self.alphas , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1.0
# setable values
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Any = torch.from_numpy(np.arange(0 , a )[::-1].copy() )
SCREAMING_SNAKE_CASE__ : Tuple = variance_type
def A_ ( self : str , a : torch.FloatTensor , a : Optional[int] = None ) ->torch.FloatTensor:
return sample
def A_ ( self : Optional[Any] , a : int , a : Union[str, torch.device] = None ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_inference_steps
SCREAMING_SNAKE_CASE__ : int = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.from_numpy(a ).to(a )
def A_ ( self : List[str] , a : str , a : Optional[Any]=None , a : Any=None , a : Optional[Any]=None ) ->str:
if prev_timestep is None:
SCREAMING_SNAKE_CASE__ : int = t - 1
SCREAMING_SNAKE_CASE__ : int = self.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.betas[t]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__ : Optional[int] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.log(torch.clamp(a , min=1E-20 ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
SCREAMING_SNAKE_CASE__ : Optional[int] = variance.log()
SCREAMING_SNAKE_CASE__ : Optional[Any] = beta.log()
SCREAMING_SNAKE_CASE__ : Dict = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__ : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self : Optional[int] , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : Optional[int] = None , a : Union[str, Any]=None , a : bool = True , ) ->Union[UnCLIPSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__ : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = torch.split(a , sample.shape[1] , dim=1 )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
SCREAMING_SNAKE_CASE__ : Tuple = t - 1
SCREAMING_SNAKE_CASE__ : int = self.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
SCREAMING_SNAKE_CASE__ : int = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
SCREAMING_SNAKE_CASE__ : List[str] = self.betas[t]
SCREAMING_SNAKE_CASE__ : int = self.alphas[t]
else:
SCREAMING_SNAKE_CASE__ : Any = 1 - alpha_prod_t / alpha_prod_t_prev
SCREAMING_SNAKE_CASE__ : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ : Optional[int] = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.clamp(
a , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__ : Tuple = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
SCREAMING_SNAKE_CASE__ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
SCREAMING_SNAKE_CASE__ : List[str] = 0
if t > 0:
SCREAMING_SNAKE_CASE__ : List[str] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=a , device=model_output.device )
SCREAMING_SNAKE_CASE__ : int = self._get_variance(
a , predicted_variance=a , prev_timestep=a , )
if self.variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__ : Optional[int] = variance
elif self.variance_type == "learned_range":
SCREAMING_SNAKE_CASE__ : List[str] = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
SCREAMING_SNAKE_CASE__ : Tuple = variance * variance_noise
SCREAMING_SNAKE_CASE__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=a , pred_original_sample=a )
def A_ ( self : List[Any] , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.IntTensor , ) ->torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
SCREAMING_SNAKE_CASE__ : Optional[Any] = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
SCREAMING_SNAKE_CASE__ : Any = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = sqrt_alpha_prod.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
SCREAMING_SNAKE_CASE__ : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE__ : List[str] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
from statistics import mean, stdev
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int = 3 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = min(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = max(_lowerCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowerCamelCase ) for x in data]
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int = 3 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = mean(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = stdev(_lowerCamelCase )
# standardize data
return [round((x - mu) / (sigma) , _lowerCamelCase ) for x in data]
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Optional[Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[int] = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Dict = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowercase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : tuple[int, int] , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = position
SCREAMING_SNAKE_CASE__ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for position in positions:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowerCamelCase )
return permissible_positions
def UpperCAmelCase ( _lowerCamelCase : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : int ):
'''simple docstring'''
if is_complete(_lowerCamelCase ):
return True
for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = position
if board[y][x] == 0:
SCREAMING_SNAKE_CASE__ : int = curr + 1
if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1 ):
return True
SCREAMING_SNAKE_CASE__ : List[str] = 0
return False
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : int = 1
if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1 ):
return board
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Dict = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowercase :List[str] = Mapping[str, np.ndarray]
__lowercase :Any = Mapping[str, Any] # Is a nested dict.
__lowercase :List[Any] = 0.0_1
@dataclasses.dataclass(frozen=lowercase__ )
class _a :
"""simple docstring"""
snake_case_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
snake_case_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
snake_case_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
snake_case_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
snake_case_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
snake_case_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
snake_case_ = None
# Templates used to generate this protein (prediction-only)
snake_case_ = None
# Chain corresponding to each parent
snake_case_ = None
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = r"(\[[A-Z]+\]\n)"
SCREAMING_SNAKE_CASE__ : List[str] = [tag.strip() for tag in re.split(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0]
SCREAMING_SNAKE_CASE__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE__ : List[str] = ["N", "CA", "C"]
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE__ : List[str] = g[1][0].strip()
for i in range(len(_lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE__ : Dict = "X" # FIXME: strings are immutable
SCREAMING_SNAKE_CASE__ : Tuple = np.array(
[residue_constants.restype_order.get(_lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE__ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowerCamelCase , g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE__ : str = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.zeros(
(
len(_lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : int = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowerCamelCase , atom_mask=_lowerCamelCase , aatype=_lowerCamelCase , residue_index=np.arange(len(_lowerCamelCase ) ) , b_factors=_lowerCamelCase , )
def UpperCAmelCase ( _lowerCamelCase : Protein , _lowerCamelCase : int = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : int = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
SCREAMING_SNAKE_CASE__ : str = prot.parents
SCREAMING_SNAKE_CASE__ : int = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [p for i, p in zip(_lowerCamelCase , _lowerCamelCase ) if i == chain_id]
if parents is None or len(_lowerCamelCase ) == 0:
SCREAMING_SNAKE_CASE__ : List[str] = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(_lowerCamelCase )}""" )
return pdb_headers
def UpperCAmelCase ( _lowerCamelCase : Protein , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : List[str] = pdb_str.split("\n" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
SCREAMING_SNAKE_CASE__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE__ : Any = []
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE__ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowerCamelCase ) , [] )
parent_dict[str(_lowerCamelCase )].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = max([int(_lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE__ : Tuple = parent_dict.get(str(_lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(_lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [["N/A"]]
def make_parent_line(_lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(_lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for i, l in enumerate(_lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE__ : Dict = ["N/A"]
out_pdb_lines.append(make_parent_line(_lowerCamelCase ) )
return "\n".join(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Protein ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = residue_constants.restypes + ["X"]
def res_atoa(_lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
SCREAMING_SNAKE_CASE__ : str = residue_constants.atom_types
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Dict = prot.atom_mask
SCREAMING_SNAKE_CASE__ : Tuple = prot.aatype
SCREAMING_SNAKE_CASE__ : str = prot.atom_positions
SCREAMING_SNAKE_CASE__ : str = prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = prot.b_factors
SCREAMING_SNAKE_CASE__ : Dict = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
SCREAMING_SNAKE_CASE__ : int = get_pdb_headers(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
pdb_lines.extend(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = aatype.shape[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = string.ascii_uppercase
SCREAMING_SNAKE_CASE__ : Dict = None
# Add all atom sites.
for i in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE__ : Optional[Any] = "ATOM"
SCREAMING_SNAKE_CASE__ : List[Any] = atom_name if len(_lowerCamelCase ) == 4 else f""" {atom_name}"""
SCREAMING_SNAKE_CASE__ : Tuple = ""
SCREAMING_SNAKE_CASE__ : Tuple = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1.0_0
SCREAMING_SNAKE_CASE__ : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE__ : List[str] = ""
SCREAMING_SNAKE_CASE__ : Any = "A"
if chain_index is not None:
SCREAMING_SNAKE_CASE__ : int = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE__ : Optional[int] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Any = chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE__ : Tuple = "TER"
SCREAMING_SNAKE_CASE__ : List[Any] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowerCamelCase , _lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Protein ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCAmelCase ( _lowerCamelCase : FeatureDict , _lowerCamelCase : ModelOutput , _lowerCamelCase : Optional[np.ndarray] = None , _lowerCamelCase : Optional[np.ndarray] = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[Sequence[str]] = None , _lowerCamelCase : Optional[Sequence[int]] = None , ):
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_lowerCamelCase , remark=_lowerCamelCase , parents=_lowerCamelCase , parents_chain_index=_lowerCamelCase , )
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def A_ ( self : Dict ) ->List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Any = False
return options
def A_ ( self : str ) ->str:
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE__ : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE__ : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : int = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE__ : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE__ : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , a : float , a : int ) ->Optional[Any]:
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE__ : Tuple = k
SCREAMING_SNAKE_CASE__ : str = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[int] ) ->str:
return str(self.k )
def A_ ( self : Optional[int] , a : str ) ->tuple[cva.Mat, list[list[int]]]:
SCREAMING_SNAKE_CASE__ : List[Any] = cva.imread(a , 0 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = img.shape
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = img.copy()
SCREAMING_SNAKE_CASE__ : List[Any] = cva.cvtColor(a , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = np.gradient(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dx**2
SCREAMING_SNAKE_CASE__ : Any = dy**2
SCREAMING_SNAKE_CASE__ : str = dx * dy
SCREAMING_SNAKE_CASE__ : int = 0.04
SCREAMING_SNAKE_CASE__ : Optional[int] = self.window_size // 2
for y in range(a , h - offset ):
for x in range(a , w - offset ):
SCREAMING_SNAKE_CASE__ : Tuple = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE__ : int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE__ : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE__ : str = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE__ : List[str] = wxx + wyy
SCREAMING_SNAKE_CASE__ : Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__lowercase :Optional[int] = HarrisCorner(0.0_4, 3)
__lowercase , __lowercase :Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : List[str] , a : Tuple=13 , a : Union[str, Any]=32 , a : str=3 , a : Dict=4 , a : List[Any]=[10, 20, 30, 40] , a : Optional[int]=[2, 2, 3, 2] , a : Union[str, Any]=True , a : List[Any]=True , a : str=37 , a : Union[str, Any]="gelu" , a : Any=10 , a : List[Any]=0.02 , a : Dict=["stage2", "stage3", "stage4"] , a : str=3 , a : List[Any]=None , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : Dict = num_stages
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = scope
SCREAMING_SNAKE_CASE__ : Tuple = num_stages
def A_ ( self : Optional[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
return config, pixel_values, labels
def A_ ( self : Union[str, Any] ) ->int:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def A_ ( self : List[Any] ) ->Any:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def A_ ( self : str , a : Tuple , a : List[Any] , a : Optional[int] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = UperNetForSemanticSegmentation(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A_ ( self : Optional[int] ) ->str:
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = UperNetModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def A_ ( self : Dict ) ->Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : List[str] ) ->Optional[Any]:
return
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def A_ ( self : Optional[Any] ) ->str:
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def A_ ( self : Any ) ->str:
pass
@unittest.skip(reason="UperNet does not have a base model" )
def A_ ( self : Union[str, Any] ) ->List[Any]:
pass
@unittest.skip(reason="UperNet does not have a base model" )
def A_ ( self : Any ) ->Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A_ ( self : Dict ) ->List[str]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self : str ) ->List[Any]:
pass
def A_ ( self : List[str] ) ->Tuple:
def check_hidden_states_output(a : Tuple , a : Any , a : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Dict = True
check_hidden_states_output(a , a , a )
def A_ ( self : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = _config_zero_init(a )
SCREAMING_SNAKE_CASE__ : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class(config=a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def A_ ( self : List[str] ) ->List[str]:
pass
@slow
def A_ ( self : Any ) ->List[str]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = UperNetForSemanticSegmentation.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(_lowerCamelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(a )
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = processor(images=a , return_tensors="pt" ).to(a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**a )
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1E-4 ) )
def A_ ( self : Tuple ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = processor(images=a , return_tensors="pt" ).to(a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**a )
SCREAMING_SNAKE_CASE__ : Any = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1E-4 ) )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.