code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : int = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_snake_case : Any = dict(zip(__A, range(len(__A ) ) ) )
_snake_case : Optional[int] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_snake_case : Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16_000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_snake_case : str = tempfile.mkdtemp()
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : List[str] = os.path.join(self.tmpdirname, __A )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.feature_extraction_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
# load decoder from hub
_snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
_snake_case : Any = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **__A )
def UpperCamelCase_ ( self: List[str], **a_: Optional[Any] ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **__A )
def UpperCamelCase_ ( self: Optional[Any], **a_: str ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **__A )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.get_tokenizer()
_snake_case : List[Any] = self.get_feature_extractor()
_snake_case : int = self.get_decoder()
_snake_case : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
processor.save_pretrained(self.tmpdirname )
_snake_case : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, __A )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_snake_case : List[str] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__A, """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__A, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = self.get_feature_extractor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_decoder()
_snake_case : Any = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : Union[str, Any] = floats_list((3, 1_000) )
_snake_case : str = feature_extractor(__A, return_tensors="""np""" )
_snake_case : Optional[Any] = processor(__A, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = self.get_feature_extractor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Dict = self.get_decoder()
_snake_case : Any = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : Union[str, Any] = """This is a test string"""
_snake_case : Any = processor(text=__A )
_snake_case : Tuple = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def UpperCamelCase_ ( self: List[Any], a_: int=(2, 10, 16), a_: List[Any]=77 ):
'''simple docstring'''
np.random.seed(__A )
return np.random.rand(*__A )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : int = self.get_feature_extractor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_decoder()
_snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : str = self._get_dummy_logits(shape=(10, 16), seed=13 )
_snake_case : int = processor.decode(__A )
_snake_case : Tuple = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual("""</s> <s> </s>""", decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCamelCase_ ( self: List[Any], a_: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_feature_extractor()
_snake_case : int = self.get_tokenizer()
_snake_case : Any = self.get_decoder()
_snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_snake_case : Tuple = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
_snake_case : Optional[int] = processor.batch_decode(__A, __A )
_snake_case : Optional[Any] = list(__A )
with get_context("""fork""" ).Pool() as p:
_snake_case : Optional[Any] = decoder.decode_beams_batch(__A, __A )
_snake_case , _snake_case , _snake_case : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A, decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""], decoded_processor.text )
self.assertListEqual(__A, decoded_processor.logit_score )
self.assertListEqual(__A, decoded_processor.lm_score )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_feature_extractor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[int] = self.get_decoder()
_snake_case : Tuple = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : str = self._get_dummy_logits()
_snake_case : int = 15
_snake_case : List[str] = -20.0
_snake_case : Union[str, Any] = -4.0
_snake_case : str = processor.batch_decode(
__A, beam_width=__A, beam_prune_logp=__A, token_min_logp=__A, )
_snake_case : List[Any] = decoded_processor_out.text
_snake_case : Union[str, Any] = list(__A )
with get_context("""fork""" ).Pool() as pool:
_snake_case : Union[str, Any] = decoder.decode_beams_batch(
__A, __A, beam_width=__A, beam_prune_logp=__A, token_min_logp=__A, )
_snake_case : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
_snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
_snake_case : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A, __A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""], __A )
self.assertTrue(np.array_equal(__A, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447], __A, atol=1E-3 ) )
self.assertTrue(np.array_equal(__A, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474], __A, atol=1E-3 ) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = self.get_feature_extractor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[int] = self.get_decoder()
_snake_case : str = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : int = self._get_dummy_logits()
_snake_case : Optional[int] = 2.0
_snake_case : str = 5.0
_snake_case : List[str] = -20.0
_snake_case : Optional[Any] = True
_snake_case : Optional[int] = processor.batch_decode(
__A, alpha=__A, beta=__A, unk_score_offset=__A, lm_score_boundary=__A, )
_snake_case : Union[str, Any] = decoded_processor_out.text
_snake_case : List[str] = list(__A )
decoder.reset_params(
alpha=__A, beta=__A, unk_score_offset=__A, lm_score_boundary=__A, )
with get_context("""fork""" ).Pool() as pool:
_snake_case : Optional[int] = decoder.decode_beams_batch(
__A, __A, )
_snake_case : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A, __A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""], __A )
_snake_case : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -20.0 )
self.assertEqual(lm_model.score_boundary, __A )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
_snake_case : Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_snake_case : Dict = os.listdir(__A )
_snake_case : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A, __A )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_snake_case : List[Any] = WavaVecaProcessorWithLM.from_pretrained(__A )
_snake_case : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
_snake_case : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_snake_case : Union[str, Any] = os.listdir(__A )
_snake_case : Tuple = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A, __A )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Union[str, Any] = floats_list((3, 1_000) )
_snake_case : Optional[int] = processor_wavaveca(__A, return_tensors="""np""" )
_snake_case : Optional[int] = processor_auto(__A, return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1E-2 )
_snake_case : int = self._get_dummy_logits()
_snake_case : Optional[int] = processor_wavaveca.batch_decode(__A )
_snake_case : Optional[Any] = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_feature_extractor()
_snake_case : int = self.get_tokenizer()
_snake_case : Any = self.get_decoder()
_snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg="""`processor` and `feature_extractor` model input names do not match""", )
@staticmethod
def UpperCamelCase_ ( a_: Any, a_: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : List[str] = self._get_dummy_logits()[0]
_snake_case : int = processor.decode(__A, output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__A, __A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""], """word""" ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """end_offset""" ), [1, 3, 5] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Tuple = self._get_dummy_logits()
_snake_case : Dict = processor.batch_decode(__A, output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__A, __A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__A, """word""" ) ) for o in outputs["""word_offsets"""]], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """end_offset""" ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
import torch
_snake_case : Tuple = load_dataset("""common_voice""", """en""", split="""train""", streaming=__A )
_snake_case : Optional[Any] = ds.cast_column("""audio""", datasets.Audio(sampling_rate=16_000 ) )
_snake_case : int = iter(__A )
_snake_case : Union[str, Any] = next(__A )
_snake_case : Union[str, Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_snake_case : Union[str, Any] = processor(sample["""audio"""]["""array"""], return_tensors="""pt""" ).input_values
with torch.no_grad():
_snake_case : Any = model(__A ).logits.cpu().numpy()
_snake_case : Tuple = processor.decode(logits[0], output_word_offsets=__A )
_snake_case : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_snake_case : List[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_snake_case : List[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__A, """word""" ) ), __A )
self.assertEqual(""" """.join(self.get_from_offsets(__A, """word""" ) ), output.text )
# output times
_snake_case : List[str] = torch.tensor(self.get_from_offsets(__A, """start_time""" ) )
_snake_case : str = torch.tensor(self.get_from_offsets(__A, """end_time""" ) )
# fmt: off
_snake_case : List[Any] = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_snake_case : Dict = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A, __A, atol=0.01 ) )
self.assertTrue(torch.allclose(__A, __A, atol=0.01 ) )
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname, legacy_format=_lowercase )
_snake_case : List[str] = tokenizer
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = """<pad>"""
_snake_case : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ), _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ), _lowercase )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<s>""" )
self.assertEqual(vocab_keys[1], """<pad>""" )
self.assertEqual(vocab_keys[-1], """<mask>""" )
self.assertEqual(len(_lowercase ), 101_122 )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 101_122 )
@require_torch
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : str = [0, 57, 3_018, 70_307, 91, 2]
_snake_case : Any = self.tokenizer(
_lowercase, max_length=len(_lowercase ), padding=_lowercase, truncation=_lowercase, return_tensors="""pt""" )
self.assertIsInstance(_lowercase, _lowercase )
self.assertEqual((2, 6), batch.input_ids.shape )
self.assertEqual((2, 6), batch.attention_mask.shape )
_snake_case : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase, _lowercase )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : List[str] = """I was born in 92000, and this is falsé."""
_snake_case : Optional[Any] = tokenizer.tokenize(_lowercase )
_snake_case : Tuple = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase, _lowercase )
_snake_case : Any = tokenizer.encode(_lowercase, add_special_tokens=_lowercase )
_snake_case : List[Any] = rust_tokenizer.encode(_lowercase, add_special_tokens=_lowercase )
self.assertListEqual(_lowercase, _lowercase )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : Optional[Any] = tokenizer.encode(_lowercase )
_snake_case : Tuple = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase, _lowercase )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = {"""input_ids""": [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_snake_case : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowercase, model_name="""moussaKam/mbarthez""", revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""", sequences=_lowercase, )
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = '''table-transformer'''
lowercase__ = ['''past_key_values''']
lowercase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self: int, a_: str=True, a_: List[Any]=None, a_: List[str]=3, a_: Optional[Any]=100, a_: Optional[int]=6, a_: Optional[Any]=2_048, a_: str=8, a_: Any=6, a_: Dict=2_048, a_: Any=8, a_: Union[str, Any]=0.0, a_: Optional[Any]=0.0, a_: List[str]=True, a_: Union[str, Any]="relu", a_: Union[str, Any]=256, a_: Union[str, Any]=0.1, a_: str=0.0, a_: Optional[Any]=0.0, a_: Tuple=0.02, a_: str=1.0, a_: Dict=False, a_: Dict="sine", a_: Tuple="resnet50", a_: int=True, a_: Dict=False, a_: Tuple=1, a_: Union[str, Any]=5, a_: int=2, a_: Optional[int]=1, a_: Tuple=1, a_: str=5, a_: str=2, a_: Optional[Any]=0.1, **a_: Any, ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_snake_case : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A__, A__ ):
_snake_case : Any = backbone_config.get("""model_type""" )
_snake_case : int = CONFIG_MAPPING[backbone_model_type]
_snake_case : str = config_class.from_dict(A__ )
# set timm attributes to None
_snake_case : Any = None, None, None
_snake_case : Optional[int] = use_timm_backbone
_snake_case : List[str] = backbone_config
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = num_queries
_snake_case : Optional[int] = d_model
_snake_case : Union[str, Any] = encoder_ffn_dim
_snake_case : str = encoder_layers
_snake_case : List[Any] = encoder_attention_heads
_snake_case : str = decoder_ffn_dim
_snake_case : Any = decoder_layers
_snake_case : Tuple = decoder_attention_heads
_snake_case : List[Any] = dropout
_snake_case : int = attention_dropout
_snake_case : Any = activation_dropout
_snake_case : Tuple = activation_function
_snake_case : Tuple = init_std
_snake_case : Union[str, Any] = init_xavier_std
_snake_case : int = encoder_layerdrop
_snake_case : List[str] = decoder_layerdrop
_snake_case : Any = encoder_layers
_snake_case : List[str] = auxiliary_loss
_snake_case : List[str] = position_embedding_type
_snake_case : Tuple = backbone
_snake_case : List[Any] = use_pretrained_backbone
_snake_case : List[Any] = dilation
# Hungarian matcher
_snake_case : Tuple = class_cost
_snake_case : Union[str, Any] = bbox_cost
_snake_case : List[Any] = giou_cost
# Loss coefficients
_snake_case : Any = mask_loss_coefficient
_snake_case : int = dice_loss_coefficient
_snake_case : Union[str, Any] = bbox_loss_coefficient
_snake_case : str = giou_loss_coefficient
_snake_case : int = eos_coefficient
super().__init__(is_encoder_decoder=A__, **A__ )
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.d_model
class lowercase( __a ):
'''simple docstring'''
lowercase__ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return 1E-5
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return 12
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase:
lowercase__ = 42
lowercase__ = 42
class lowercase:
def __init__( self: str, a_: Optional[int] ):
'''simple docstring'''
_snake_case : str = [[] for _ in range(_a )]
_snake_case : Dict = size
def __getitem__( self: str, a_: Tuple ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self._size
def UpperCamelCase_ ( self: Optional[Any], a_: Dict, a_: Dict, a_: List[Any] ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a, _a ) )
def UpperCamelCase_ ( self: str, a_: Optional[int], a_: Dict ):
'''simple docstring'''
_snake_case : List[str] = deque([start_vertex] )
_snake_case : int = [None] * self.size
_snake_case : Optional[int] = 0
while queue:
_snake_case : Union[str, Any] = queue.popleft()
_snake_case : Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case : str = current_distance + edge.weight
_snake_case : List[Any] = distances[edge.destination_vertex]
if (
isinstance(_a, _a )
and new_distance >= dest_vertex_distance
):
continue
_snake_case : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ = logging.getLogger(__name__)
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase__ = field(
default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ = field(
default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase__ = field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
lowercase__ = field(metadata={"help": "Should contain the data files for the task."} )
lowercase__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase__ = field(
default=__a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case , _snake_case , _snake_case : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
_snake_case : Any = processors[data_args.task_name]()
_snake_case : int = processor.get_labels()
_snake_case : str = len(_lowerCamelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
_snake_case : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_snake_case : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case__ : EvalPrediction ) -> Dict:
_snake_case : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowerCamelCase , p.label_ids )}
# Data collator
_snake_case : Tuple = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_snake_case : Tuple = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case : Tuple = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_snake_case : Dict = trainer.evaluate()
_snake_case : List[Any] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _lowerCamelCase , _lowerCamelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_lowerCamelCase )
return results
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase( snake_case_ ):
'''simple docstring'''
lowercase__ = field(default=snake_case_ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ = field(
default=snake_case_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ = field(
default=snake_case_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ = field(
default=snake_case_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ = field(
default=snake_case_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(a_, a_ ):
_snake_case : str = v.to_dict()
return d
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_12,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
A_ = parser.parse_args()
A_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : float , snake_case__ : int ):
_snake_case : int = u
for i in range(1 , __lowercase ):
_snake_case : Any = temp * (u - i)
return temp
def UpperCAmelCase__ ():
_snake_case : List[Any] = int(input("""enter the numbers of values: """ ) )
_snake_case : list[list[float]] = []
for _ in range(__lowercase ):
y.append([] )
for i in range(__lowercase ):
for j in range(__lowercase ):
y[i].append(__lowercase )
_snake_case : Dict = 0
print("""enter the values of parameters in a list: """ )
_snake_case : Optional[int] = list(map(__lowercase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(__lowercase ):
_snake_case : Union[str, Any] = float(input() )
_snake_case : List[Any] = int(input("""enter the value to interpolate: """ ) )
_snake_case : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowercase ):
for j in range(n - i ):
_snake_case : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
_snake_case : List[str] = y[0][0]
for i in range(1 , __lowercase ):
summ += (ucal(__lowercase , __lowercase ) * y[0][i]) / math.factorial(__lowercase )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = []
for part_id in partition_order:
_snake_case : str = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_snake_case : Tuple = spark.range(1_00 ).repartition(1 )
_snake_case : Tuple = Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_snake_case : List[str] = spark.range(10 ).repartition(2 )
_snake_case : int = [1, 0]
_snake_case : Any = _generate_iterable_examples(__snake_case , __snake_case ) # Reverse the partitions.
_snake_case : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case , __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_snake_case , _snake_case : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_snake_case : Tuple = spark.range(10 ).repartition(1 )
_snake_case : Any = SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_snake_case : Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
_snake_case : Union[str, Any] = lambda snake_case__ : x.reverse()
_snake_case : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case , [2, 1, 0] )
_snake_case : Dict = SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
_snake_case , _snake_case : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_snake_case : int = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_snake_case : Union[str, Any] = SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_snake_case : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case , [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
_snake_case , _snake_case : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_snake_case : Tuple = SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_snake_case : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case , [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
_snake_case , _snake_case : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_snake_case : Any = spark.range(1_00 ).repartition(1 )
_snake_case : Optional[Any] = Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
_snake_case : int = Mock()
_snake_case : int = conn, Mock()
_snake_case : Any = iter([1, None] )
_snake_case : Any = lambda snake_case__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class lowercase( __lowerCamelCase ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = '''left'''
def __init__( self: int, a_: List[Any], a_: int=False, a_: Tuple=True, a_: Dict=False, a_: Any="<s>", a_: int="</s>", a_: List[str]="<unk>", a_: str="<sep>", a_: Tuple="<pad>", a_: int="<cls>", a_: Any="<mask>", a_: Tuple=["<eop>", "<eod>"], a_: List[str] = None, **a_: int, ):
'''simple docstring'''
_snake_case : str = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token
_snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_, remove_space=a_, keep_accents=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, additional_special_tokens=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
_snake_case : str = 3
_snake_case : Dict = do_lower_case
_snake_case : str = remove_space
_snake_case : Tuple = keep_accents
_snake_case : Dict = vocab_file
_snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Any ):
'''simple docstring'''
_snake_case : Any = self.__dict__.copy()
_snake_case : Optional[int] = None
return state
def __setstate__( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
_snake_case : int = {}
_snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self: int, a_: Tuple ):
'''simple docstring'''
if self.remove_space:
_snake_case : Optional[int] = " ".join(inputs.strip().split() )
else:
_snake_case : str = inputs
_snake_case : Any = outputs.replace("""``""", """\"""" ).replace("""''""", """\"""" )
if not self.keep_accents:
_snake_case : Dict = unicodedata.normalize("""NFKD""", a_ )
_snake_case : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
_snake_case : Any = outputs.lower()
return outputs
def UpperCamelCase_ ( self: Dict, a_: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.preprocess_text(a_ )
_snake_case : Optional[int] = self.sp_model.encode(a_, out_type=a_ )
_snake_case : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_snake_case : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_, """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_snake_case : int = cur_pieces[1:]
else:
_snake_case : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def UpperCamelCase_ ( self: List[str], a_: Union[str, Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def UpperCamelCase_ ( self: Optional[int], a_: Tuple ):
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
_snake_case : Dict = "".join(a_ ).replace(a_, """ """ ).strip()
return out_string
def UpperCamelCase_ ( self: Dict, a_: int, a_: Dict = False, a_: Optional[Any] = None, a_: List[Any] = True, **a_: Dict, ):
'''simple docstring'''
_snake_case : int = kwargs.pop("""use_source_tokenizer""", a_ )
_snake_case : List[str] = self.convert_ids_to_tokens(a_, skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_snake_case : Optional[int] = []
_snake_case : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
_snake_case : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_snake_case : Union[str, Any] = "".join(a_ )
_snake_case : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_snake_case : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def UpperCamelCase_ ( self: Any, a_: List[Any], a_: List[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: Tuple = None, a_: Optional[Any] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def UpperCamelCase_ ( self: Union[str, Any], a_: Any, a_: Optional[Any] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.sep_token_id]
_snake_case : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase_ ( self: List[Any], a_: str, a_: Optional[Any] = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : Any = os.path.join(
a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, """wb""" ) as fi:
_snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any]=0.9_99 , snake_case__ : int="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_snake_case : Optional[int] = []
for i in range(_snake_case ):
_snake_case : str = i / num_diffusion_timesteps
_snake_case : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class lowercase( __a , __a ):
'''simple docstring'''
lowercase__ = [e.name for e in KarrasDiffusionSchedulers]
lowercase__ = 2
@register_to_config
def __init__( self: Any, a_: List[Any] = 1_000, a_: Union[str, Any] = 0.00_085, a_: str = 0.012, a_: List[Any] = "linear", a_: str = None, a_: Union[str, Any] = "epsilon", a_: int = "linspace", a_: Dict = 0, ):
'''simple docstring'''
if trained_betas is not None:
_snake_case : Optional[int] = torch.tensor(a_, dtype=torch.floataa )
elif beta_schedule == "linear":
_snake_case : Optional[int] = torch.linspace(a_, a_, a_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case : str = (
torch.linspace(beta_start**0.5, beta_end**0.5, a_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case : Dict = betas_for_alpha_bar(a_ )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
_snake_case : Union[str, Any] = 1.0 - self.betas
_snake_case : Any = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(a_, a_, a_ )
def UpperCamelCase_ ( self: List[str], a_: Optional[Any], a_: List[str]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_snake_case : int = self.timesteps
_snake_case : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_snake_case : Optional[int] = 1 if len(a_ ) > 1 else 0
else:
_snake_case : List[str] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
_snake_case : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ ( self: Any, a_: Dict, a_: Union[str, Any], ):
'''simple docstring'''
_snake_case : List[Any] = self.index_for_timestep(a_ )
if self.state_in_first_order:
_snake_case : Tuple = self.sigmas[step_index]
else:
_snake_case : Any = self.sigmas_interpol[step_index]
_snake_case : Dict = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ ( self: int, a_: str, a_: List[str] = None, a_: Optional[int] = None, ):
'''simple docstring'''
_snake_case : List[Any] = num_inference_steps
_snake_case : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_snake_case : Union[str, Any] = np.linspace(0, num_train_timesteps - 1, a_, dtype=a_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_snake_case : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case : Tuple = (np.arange(0, a_ ) * step_ratio).round()[::-1].copy().astype(a_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_snake_case : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case : Optional[Any] = (np.arange(a_, 0, -step_ratio )).round().copy().astype(a_ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'." )
_snake_case : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_snake_case : Optional[int] = torch.from_numpy(np.log(a_ ) ).to(a_ )
_snake_case : Optional[Any] = np.interp(a_, np.arange(0, len(a_ ) ), a_ )
_snake_case : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_snake_case : int = torch.from_numpy(a_ ).to(device=a_ )
# interpolate sigmas
_snake_case : Optional[int] = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
_snake_case : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_snake_case : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(a_ ).startswith("""mps""" ):
# mps does not support float64
_snake_case : Optional[Any] = torch.from_numpy(a_ ).to(a_, dtype=torch.floataa )
else:
_snake_case : Tuple = torch.from_numpy(a_ ).to(a_ )
# interpolate timesteps
_snake_case : Optional[int] = self.sigma_to_t(a_ ).to(a_, dtype=timesteps.dtype )
_snake_case : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
_snake_case : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] )
_snake_case : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_snake_case : Any = defaultdict(a_ )
def UpperCamelCase_ ( self: List[str], a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = sigma.log()
# get distribution
_snake_case : Dict = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_snake_case : Dict = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_snake_case : Dict = low_idx + 1
_snake_case : List[str] = self.log_sigmas[low_idx]
_snake_case : int = self.log_sigmas[high_idx]
# interpolate sigmas
_snake_case : str = (low - log_sigma) / (low - high)
_snake_case : str = w.clamp(0, 1 )
# transform interpolation to time range
_snake_case : int = (1 - w) * low_idx + w * high_idx
_snake_case : List[Any] = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return self.sample is None
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Union[str, Any], a_: str, a_: Union[str, Any] = True, ):
'''simple docstring'''
_snake_case : int = self.index_for_timestep(a_ )
# advance index counter by 1
_snake_case : List[str] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_snake_case : Optional[Any] = self.sigmas[step_index]
_snake_case : Optional[int] = self.sigmas_interpol[step_index + 1]
_snake_case : List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_snake_case : List[str] = self.sigmas[step_index - 1]
_snake_case : str = self.sigmas_interpol[step_index]
_snake_case : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_snake_case : Union[str, Any] = 0
_snake_case : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_snake_case : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_snake_case : int = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_snake_case : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_snake_case : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_snake_case : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_snake_case : List[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
_snake_case : Union[str, Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_snake_case : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_snake_case : List[str] = sigma_next - sigma_hat
_snake_case : Tuple = self.sample
_snake_case : Any = None
_snake_case : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def UpperCamelCase_ ( self: List[str], a_: Optional[int], a_: List[str], a_: Dict, ):
'''simple docstring'''
_snake_case : Optional[Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a_ ):
# mps does not support float64
_snake_case : Dict = self.timesteps.to(original_samples.device, dtype=torch.floataa )
_snake_case : Tuple = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
_snake_case : int = self.timesteps.to(original_samples.device )
_snake_case : Dict = timesteps.to(original_samples.device )
_snake_case : Union[str, Any] = [self.index_for_timestep(a_, a_ ) for t in timesteps]
_snake_case : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_snake_case : Optional[int] = sigma.unsqueeze(-1 )
_snake_case : str = original_samples + noise * sigma
return noisy_samples
def __len__( self: Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowercase:
'''simple docstring'''
lowercase__ = None
lowercase__ = None
lowercase__ = None # sigma(t_i)
@classmethod
def UpperCamelCase_ ( cls: Optional[Any] ):
'''simple docstring'''
return cls()
@dataclass
class lowercase( lowerCAmelCase__ ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
class lowercase( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return True
@register_to_config
def __init__( self: int, a_: Dict = 0.02, a_: List[Any] = 100, a_: Any = 1.007, a_: List[Any] = 80, a_: str = 0.05, a_: Dict = 50, ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return KarrasVeSchedulerState.create()
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: str, a_: int = () ):
'''simple docstring'''
_snake_case : Dict = jnp.arange(0, _SCREAMING_SNAKE_CASE )[::-1].copy()
_snake_case : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_SCREAMING_SNAKE_CASE, schedule=jnp.array(_SCREAMING_SNAKE_CASE, dtype=jnp.floataa ), timesteps=_SCREAMING_SNAKE_CASE, )
def UpperCamelCase_ ( self: Union[str, Any], a_: Any, a_: Any, a_: Union[str, Any], a_: Dict, ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_snake_case : Optional[Any] = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
_snake_case : List[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
_snake_case : str = random.split(_SCREAMING_SNAKE_CASE, num=1 )
_snake_case : Optional[Any] = self.config.s_noise * random.normal(key=_SCREAMING_SNAKE_CASE, shape=sample.shape )
_snake_case : Tuple = sigma + gamma * sigma
_snake_case : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase_ ( self: Any, a_: Optional[int], a_: int, a_: Tuple, a_: List[Any], a_: int, a_: Optional[int] = True, ):
'''simple docstring'''
_snake_case : Dict = sample_hat + sigma_hat * model_output
_snake_case : Dict = (sample_hat - pred_original_sample) / sigma_hat
_snake_case : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE, derivative=_SCREAMING_SNAKE_CASE, state=_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: Union[str, Any], a_: str, a_: List[Any], a_: Union[str, Any], a_: Optional[int], a_: Optional[int], a_: List[Any] = True, ):
'''simple docstring'''
_snake_case : str = sample_prev + sigma_prev * model_output
_snake_case : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
_snake_case : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE, derivative=_SCREAMING_SNAKE_CASE, state=_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self: Optional[Any], a_: List[str], a_: int, a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[Any] ): # noqa: E741
"""simple docstring"""
while r - l > 1:
_snake_case : List[Any] = (l + r) // 2
if v[m] >= key:
_snake_case : Tuple = m
else:
_snake_case : str = m # noqa: E741
return r
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
if len(A_ ) == 0:
return 0
_snake_case : int = [0] * len(A_ )
_snake_case : Dict = 1
_snake_case : List[Any] = v[0]
for i in range(1 , len(A_ ) ):
if v[i] < tail[0]:
_snake_case : str = v[i]
elif v[i] > tail[length - 1]:
_snake_case : int = v[i]
length += 1
else:
_snake_case : Union[str, Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowercase( snake_case__ ):
'''simple docstring'''
lowercase__ = '''mra'''
def __init__( self: Any, a_: List[str]=50_265, a_: Any=768, a_: Tuple=12, a_: Optional[Any]=12, a_: Optional[Any]=3_072, a_: Union[str, Any]="gelu", a_: int=0.1, a_: Dict=0.1, a_: List[str]=512, a_: Optional[int]=1, a_: Dict=0.02, a_: int=1E-5, a_: List[str]="absolute", a_: Optional[int]=4, a_: Dict="full", a_: str=0, a_: List[str]=0, a_: Optional[Any]=1, a_: List[str]=0, a_: Any=2, **a_: List[str], ):
'''simple docstring'''
super().__init__(pad_token_id=_A, bos_token_id=_A, eos_token_id=_A, **_A )
_snake_case : List[str] = vocab_size
_snake_case : str = max_position_embeddings
_snake_case : Optional[Any] = hidden_size
_snake_case : List[Any] = num_hidden_layers
_snake_case : str = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = initializer_range
_snake_case : List[str] = type_vocab_size
_snake_case : Dict = layer_norm_eps
_snake_case : int = position_embedding_type
_snake_case : Optional[Any] = block_per_row
_snake_case : int = approx_mode
_snake_case : str = initial_prior_first_n_blocks
_snake_case : Tuple = initial_prior_diagonal_n_blocks
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
import json
import sys
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
with open(__snake_case , encoding="""utf-8""" ) as f:
_snake_case : Tuple = json.load(__snake_case )
_snake_case : Any = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(__snake_case ):
_snake_case : int = results[benchmark_name]
_snake_case : int = benchmark_name.split("""/""" )[-1]
output_md.append(F"### Benchmark: {benchmark_file_name}" )
_snake_case : Optional[Any] = """| metric |"""
_snake_case : str = """|--------|"""
_snake_case : int = """| new / old (diff) |"""
for metric_name in sorted(__snake_case ):
_snake_case : Dict = benchmark_res[metric_name]
_snake_case : Tuple = metric_vals["""new"""]
_snake_case : Optional[int] = metric_vals.get("""old""" , __snake_case )
_snake_case : Optional[int] = metric_vals.get("""diff""" , __snake_case )
_snake_case : List[str] = F" {new_val:f}" if isinstance(__snake_case , (int, float) ) else """None"""
if old_val is not None:
val_str += F" / {old_val:f}" if isinstance(__snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += F" ({dif_val:f})" if isinstance(__snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(__snake_case ) )
if __name__ == "__main__":
A_ = sys.argv[1]
A_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A_ = logging.getLogger(__name__)
A_ = '''Hello world! cécé herlolip'''
A_ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = BertAbsConfig(
temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
_snake_case : int = torch.load(lowerCamelCase_ , lambda snake_case__ , snake_case__ : storage )
_snake_case : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ )
original.eval()
_snake_case : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_snake_case : int = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_snake_case : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) )
_snake_case : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
_snake_case : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) )
_snake_case : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_snake_case : Optional[int] = encoder_input_ids
_snake_case : Optional[Any] = decoder_input_ids
_snake_case : List[str] = None
_snake_case : Tuple = None
_snake_case : int = None
_snake_case : List[Any] = None
_snake_case : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_snake_case : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Optional[Any] = original.generator(lowerCamelCase_ )
_snake_case : List[Any] = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : str = new_model.generator(lowerCamelCase_ )
_snake_case : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
_snake_case : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
_snake_case : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
A_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowercase( __lowerCamelCase ):
'''simple docstring'''
def __init__( self: Union[str, Any], a_: Any, a_: List[Any]=13, a_: Union[str, Any]=7, a_: List[Any]=True, a_: List[str]=True, a_: Tuple=False, a_: Optional[int]=True, a_: int=99, a_: Dict=32, a_: Dict=5, a_: Union[str, Any]=4, a_: str=37, a_: Optional[int]="gelu", a_: Tuple=0.1, a_: Optional[Any]=0.1, a_: Tuple=512, a_: List[Any]=16, a_: Dict=2, a_: int=0.02, a_: List[Any]=3, a_: Optional[int]=4, a_: str=None, ):
'''simple docstring'''
_snake_case : str = parent
_snake_case : int = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Tuple = use_input_mask
_snake_case : Optional[Any] = use_token_type_ids
_snake_case : Optional[Any] = use_labels
_snake_case : Optional[Any] = vocab_size
_snake_case : Union[str, Any] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Any = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : List[str] = type_sequence_label_size
_snake_case : Dict = initializer_range
_snake_case : List[str] = num_labels
_snake_case : Any = num_choices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : Union[str, Any] = None
if self.use_input_mask:
_snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[Any] = None
_snake_case : Optional[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : Any = ids_tensor([self.batch_size], self.num_choices )
_snake_case : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self: Tuple, a_: Optional[Any], a_: List[Any], a_: int, a_: Optional[Any], a_: Any, a_: Tuple ):
'''simple docstring'''
_snake_case : Dict = DistilBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_snake_case : Optional[Any] = model(UpperCamelCase_, UpperCamelCase_ )
_snake_case : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: Any, a_: Optional[Any], a_: List[Any], a_: str, a_: Optional[int], a_: Any, a_: Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = DistilBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_snake_case : List[str] = model(UpperCamelCase_, attention_mask=UpperCamelCase_, labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: Dict, a_: Any, a_: Any, a_: Optional[Any], a_: str ):
'''simple docstring'''
_snake_case : int = DistilBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_snake_case : Tuple = model(
UpperCamelCase_, attention_mask=UpperCamelCase_, start_positions=UpperCamelCase_, end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, Any], a_: Tuple, a_: List[Any], a_: Any, a_: Tuple, a_: Optional[int] ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : Optional[int] = DistilBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_snake_case : str = model(UpperCamelCase_, attention_mask=UpperCamelCase_, labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: str, a_: Optional[int], a_: Union[str, Any], a_: Optional[int], a_: Dict, a_: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.num_labels
_snake_case : Optional[Any] = DistilBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_snake_case : int = model(UpperCamelCase_, attention_mask=UpperCamelCase_, labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self: List[str], a_: Optional[Any], a_: str, a_: Optional[int], a_: Union[str, Any], a_: List[str], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.num_choices
_snake_case : Any = DistilBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_snake_case : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : str = model(
UpperCamelCase_, attention_mask=UpperCamelCase_, labels=UpperCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : Union[str, Any] = config_and_inputs
_snake_case : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase__ = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = DistilBertModelTester(self )
_snake_case : Any = ConfigTester(self, config_class=UpperCamelCase_, dim=37 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase_ )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = DistilBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_snake_case : List[Any] = True
_snake_case : Optional[Any] = model_class(config=UpperCamelCase_ )
_snake_case : Union[str, Any] = self._prepare_for_class(UpperCamelCase_, UpperCamelCase_ )
_snake_case : Any = torch.jit.trace(
UpperCamelCase_, (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_, os.path.join(UpperCamelCase_, """traced_model.pt""" ) )
_snake_case : Union[str, Any] = torch.jit.load(os.path.join(UpperCamelCase_, """traced_model.pt""" ), map_location=UpperCamelCase_ )
loaded(inputs_dict["""input_ids"""].to(UpperCamelCase_ ), inputs_dict["""attention_mask"""].to(UpperCamelCase_ ) )
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_snake_case : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_snake_case : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : Optional[int] = model(UpperCamelCase_, attention_mask=UpperCamelCase_ )[0]
_snake_case : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, UpperCamelCase_ )
_snake_case : str = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], UpperCamelCase_, atol=1E-4 ) )
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class lowercase( _UpperCamelCase ):
'''simple docstring'''
lowercase__ = ["input_features"]
def __init__( self: Dict, a_: int=80, a_: Union[str, Any]=16_000, a_: Optional[int]=160, a_: Optional[Any]=30, a_: Dict=400, a_: Optional[Any]=0.0, a_: int=False, **a_: Tuple, ):
'''simple docstring'''
super().__init__(
feature_size=a_, sampling_rate=a_, padding_value=a_, return_attention_mask=a_, **a_, )
_snake_case : List[str] = n_fft
_snake_case : Any = hop_length
_snake_case : Dict = chunk_length
_snake_case : Union[str, Any] = chunk_length * sampling_rate
_snake_case : Any = self.n_samples // hop_length
_snake_case : List[Any] = sampling_rate
_snake_case : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=a_, min_frequency=0.0, max_frequency=8_000.0, sampling_rate=a_, norm="""slaney""", mel_scale="""slaney""", )
def UpperCamelCase_ ( self: Optional[Any], a_: np.array ):
'''simple docstring'''
_snake_case : Tuple = spectrogram(
a_, window_function(self.n_fft, """hann""" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel="""log10""", )
_snake_case : Tuple = log_spec[:, :-1]
_snake_case : Any = np.maximum(a_, log_spec.max() - 8.0 )
_snake_case : Optional[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ ( a_: List[np.ndarray], a_: List[np.ndarray], a_: float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
_snake_case : List[str] = np.array(a_, np.intaa )
_snake_case : List[str] = []
for vector, length in zip(a_, attention_mask.sum(-1 ) ):
_snake_case : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_snake_case : List[Any] = padding_value
normed_input_values.append(a_ )
else:
_snake_case : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self: str, a_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], a_: bool = True, a_: Optional[int] = None, a_: Optional[Union[str, TensorType]] = None, a_: Optional[bool] = None, a_: Optional[str] = "max_length", a_: Optional[int] = None, a_: Optional[int] = None, a_: Optional[bool] = None, **a_: List[Any], ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_snake_case : str = isinstance(a_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_snake_case : Any = is_batched_numpy or (
isinstance(a_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_, np.ndarray ):
_snake_case : Tuple = np.asarray(a_, dtype=np.floataa )
elif isinstance(a_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case : Optional[Any] = [np.asarray([raw_speech] ).T]
_snake_case : List[str] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
_snake_case : List[Any] = self.pad(
a_, padding=a_, max_length=max_length if max_length else self.n_samples, truncation=a_, pad_to_multiple_of=a_, return_attention_mask=return_attention_mask or do_normalize, )
# zero-mean and unit-variance normalization
if do_normalize:
_snake_case : Dict = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""], attention_mask=padded_inputs["""attention_mask"""], padding_value=self.padding_value, )
_snake_case : Dict = np.stack(padded_inputs["""input_features"""], axis=0 )
# make sure list is in array format
_snake_case : Tuple = padded_inputs.get("""input_features""" ).transpose(2, 0, 1 )
_snake_case : str = [self._np_extract_fbank_features(a_ ) for waveform in input_features[0]]
if isinstance(input_features[0], a_ ):
_snake_case : Optional[int] = [np.asarray(a_, dtype=np.floataa ) for feature in input_features]
else:
_snake_case : List[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_snake_case : Optional[int] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_snake_case : str = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = copy.deepcopy(self.__dict__ )
_snake_case : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
def UpperCAmelCase__ (snake_case__ : Union[str, Any] = 50 ):
"""simple docstring"""
_snake_case : List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self: int, a_: Optional[int], a_: Optional[Any], a_: Union[str, Any], a_: List[Any] = 1.0, a_: Union[str, Any] = None, ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = initial_learning_rate
_snake_case : Any = warmup_steps
_snake_case : List[Any] = power
_snake_case : List[str] = decay_schedule_fn
_snake_case : Optional[int] = name
def __call__( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_snake_case : List[Any] = tf.cast(lowerCAmelCase_, tf.floataa )
_snake_case : Optional[int] = tf.cast(self.warmup_steps, tf.floataa )
_snake_case : str = global_step_float / warmup_steps_float
_snake_case : Tuple = self.initial_learning_rate * tf.math.pow(lowerCAmelCase_, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCAmelCase_, )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ (snake_case__ : float , snake_case__ : int , snake_case__ : int , snake_case__ : float = 0.0 , snake_case__ : float = 0.9 , snake_case__ : float = 0.9_99 , snake_case__ : float = 1e-8 , snake_case__ : Optional[float] = None , snake_case__ : Optional[float] = None , snake_case__ : float = 0.0 , snake_case__ : float = 1.0 , snake_case__ : Optional[List[str]] = None , ):
"""simple docstring"""
_snake_case : str = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=snake_case__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=snake_case__ , )
if num_warmup_steps:
_snake_case : Tuple = WarmUp(
initial_learning_rate=snake_case__ , decay_schedule_fn=snake_case__ , warmup_steps=snake_case__ , )
if weight_decay_rate > 0.0:
_snake_case : Tuple = AdamWeightDecay(
learning_rate=snake_case__ , weight_decay_rate=snake_case__ , beta_a=snake_case__ , beta_a=snake_case__ , epsilon=snake_case__ , clipnorm=snake_case__ , global_clipnorm=snake_case__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=snake_case__ , )
else:
_snake_case : List[str] = tf.keras.optimizers.Adam(
learning_rate=snake_case__ , beta_a=snake_case__ , beta_a=snake_case__ , epsilon=snake_case__ , clipnorm=snake_case__ , global_clipnorm=snake_case__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Optional[int], a_: Optional[Any] = 0.001, a_: Optional[int] = 0.9, a_: Dict = 0.999, a_: Union[str, Any] = 1E-7, a_: Tuple = False, a_: Dict = 0.0, a_: str = None, a_: Dict = None, a_: Optional[int] = "AdamWeightDecay", **a_: int, ):
'''simple docstring'''
super().__init__(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, **lowerCAmelCase_ )
_snake_case : List[str] = weight_decay_rate
_snake_case : int = include_in_weight_decay
_snake_case : Optional[int] = exclude_from_weight_decay
@classmethod
def UpperCamelCase_ ( cls: int, a_: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = {"""WarmUp""": WarmUp}
return super(lowerCAmelCase_, cls ).from_config(lowerCAmelCase_, custom_objects=lowerCAmelCase_ )
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any], a_: int, a_: str ):
'''simple docstring'''
super(lowerCAmelCase_, self )._prepare_local(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
_snake_case : Optional[int] = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def UpperCamelCase_ ( self: str, a_: Optional[Any], a_: int, a_: str ):
'''simple docstring'''
_snake_case : str = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def UpperCamelCase_ ( self: int, a_: Optional[int], a_: Union[str, Any]=None, **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : str = list(zip(*lowerCAmelCase_ ) )
return super(lowerCAmelCase_, self ).apply_gradients(zip(lowerCAmelCase_, lowerCAmelCase_ ), name=lowerCAmelCase_, **lowerCAmelCase_ )
def UpperCamelCase_ ( self: Dict, a_: int, a_: List[Any], a_: List[str] ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_snake_case : Optional[Any] = apply_state or {}
_snake_case : str = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_snake_case : List[str] = self._fallback_apply_state(lowerCAmelCase_, lowerCAmelCase_ )
_snake_case : Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Union[str, Any], a_: Optional[Any]=None ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self._get_lr(var.device, var.dtype.base_dtype, lowerCAmelCase_ )
_snake_case : Dict = self._decay_weights_op(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase_, self )._resource_apply_dense(lowerCAmelCase_, lowerCAmelCase_, **lowerCAmelCase_ )
def UpperCamelCase_ ( self: List[str], a_: Optional[int], a_: Optional[Any], a_: Any, a_: int=None ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCAmelCase_ )
_snake_case : int = self._decay_weights_op(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase_, self )._resource_apply_sparse(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, **lowerCAmelCase_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase_, lowerCAmelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase_, lowerCAmelCase_ ) is not None:
return False
return True
class lowercase( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Any ):
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Tuple = None
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if self._accum_steps is None:
_snake_case : Any = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=lowerCAmelCase_, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self: Any, a_: Optional[Any] ):
'''simple docstring'''
if not self._gradients:
_snake_case : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase_ ), trainable=lowerCAmelCase_, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCAmelCase_ ) != len(self._gradients ):
raise ValueError(f"Expected {len(self._gradients )} gradients, but got {len(lowerCAmelCase_ )}" )
for accum_gradient, gradient in zip(self._gradients, lowerCAmelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase_ )
self._accum_steps.assign_add(1 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase_ ) )
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
A_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
A_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
], )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase_ ( self: Optional[int], a_: List[Any], a_: List[str], a_: List[Any]=None, a_: str="uniform_average", a_: int=True ):
'''simple docstring'''
_snake_case : List[str] = mean_squared_error(
UpperCAmelCase__, UpperCAmelCase__, sample_weight=UpperCAmelCase__, multioutput=UpperCAmelCase__, squared=UpperCAmelCase__ )
return {"mse": mse}
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
A_ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
A_ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
A_ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage="""https://github.com/krishnap25/mauve""", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""", id="""sequence""" ),
"""references""": datasets.Value("""string""", id="""sequence""" ),
} ), codebase_urls=["""https://github.com/krishnap25/mauve"""], reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
], )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: List[Any], a_: List[Any]=None, a_: Union[str, Any]=None, a_: List[str]=None, a_: Any=None, a_: Any="auto", a_: Optional[Any]=-1, a_: Any=0.9, a_: Optional[Any]=5, a_: List[str]=500, a_: Optional[Any]="gpt2-large", a_: Tuple=-1, a_: Optional[Any]=1_024, a_: Optional[Any]=25, a_: Optional[Any]=5, a_: Optional[int]=True, a_: Optional[int]=25, ):
'''simple docstring'''
_snake_case : str = compute_mauve(
p_text=snake_case__, q_text=snake_case__, p_features=snake_case__, q_features=snake_case__, p_tokens=snake_case__, q_tokens=snake_case__, num_buckets=snake_case__, pca_max_data=snake_case__, kmeans_explained_var=snake_case__, kmeans_num_redo=snake_case__, kmeans_max_iter=snake_case__, featurize_model_name=snake_case__, device_id=snake_case__, max_text_length=snake_case__, divergence_curve_discretization_size=snake_case__, mauve_scaling_factor=snake_case__, verbose=snake_case__, seed=snake_case__, )
return out
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[str] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(__a , __a )
A_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Optional[Any] = list(s_dict.keys() )
for key in keys:
_snake_case : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_snake_case : str = new_key.replace(__a , __a )
print(F"{key} -> {new_key}" )
_snake_case : List[Any] = s_dict.pop(__a )
return s_dict
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case : int = emb.weight.shape
_snake_case : Any = nn.Linear(__a , __a , bias=__a )
_snake_case : List[Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
os.makedirs(__a , exist_ok=__a )
_snake_case : Any = os.path.basename(__a )
_snake_case : str = url.split("""/""" )[-2]
_snake_case : Any = os.path.join(__a , __a )
if os.path.exists(__a ) and not os.path.isfile(__a ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(__a ):
_snake_case : Union[str, Any] = open(__a , """rb""" ).read()
if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=10_24 ) as loop:
while True:
_snake_case : List[Any] = source.read(81_92 )
if not buffer:
break
output.write(__a )
loop.update(len(__a ) )
_snake_case : Optional[Any] = open(__a , """rb""" ).read()
if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
_snake_case : int = _download(_MODELS[checkpoint_path] )
else:
_snake_case : int = torch.load(__a , map_location="""cpu""" )
_snake_case : Dict = original_checkpoint["""dims"""]
_snake_case : str = original_checkpoint["""model_state_dict"""]
_snake_case : List[str] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(__a )
rename_keys(__a )
_snake_case : Any = True
_snake_case : List[str] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
_snake_case : Any = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
_snake_case : int = WhisperForConditionalGeneration(__a )
_snake_case , _snake_case : List[str] = model.model.load_state_dict(__a , strict=__a )
if len(__a ) > 0 and not set(__a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F" but all the following weights are missing {missing}" )
if tie_embeds:
_snake_case : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_snake_case : int = proj_out_weights
model.save_pretrained(__a )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase:
def __init__( self: Tuple, a_: Any, a_: Tuple=13, a_: Union[str, Any]=7, a_: Union[str, Any]=True, a_: List[str]=True, a_: str=True, a_: List[str]=True, a_: str=True, a_: Dict=False, a_: int=False, a_: int=False, a_: List[Any]=2, a_: Optional[int]=99, a_: List[str]=0, a_: List[str]=32, a_: Tuple=5, a_: str=4, a_: int=0.1, a_: Union[str, Any]=0.1, a_: Optional[int]=512, a_: List[str]=2, a_: Optional[int]=0.02, a_: str=2, a_: List[str]=4, a_: int="last", a_: Optional[Any]=True, a_: Optional[int]=None, a_: List[str]=0, ):
'''simple docstring'''
_snake_case : List[str] = parent
_snake_case : Tuple = batch_size
_snake_case : Union[str, Any] = seq_length
_snake_case : Dict = is_training
_snake_case : Dict = use_input_lengths
_snake_case : List[str] = use_token_type_ids
_snake_case : Dict = use_labels
_snake_case : int = gelu_activation
_snake_case : List[str] = sinusoidal_embeddings
_snake_case : Tuple = causal
_snake_case : Tuple = asm
_snake_case : List[Any] = n_langs
_snake_case : List[Any] = vocab_size
_snake_case : List[Any] = n_special
_snake_case : Dict = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout_prob
_snake_case : Optional[Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : List[Any] = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : Optional[Any] = num_labels
_snake_case : str = num_choices
_snake_case : Tuple = summary_type
_snake_case : Tuple = use_proj
_snake_case : List[Any] = scope
_snake_case : int = bos_token_id
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Union[str, Any] = None
if self.use_input_lengths:
_snake_case : int = (
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_snake_case : Optional[int] = None
if self.use_token_type_ids:
_snake_case : int = ids_tensor([self.batch_size, self.seq_length], self.n_langs )
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Optional[Any] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : Optional[Any] = ids_tensor([self.batch_size], 2 ).float()
_snake_case : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
_snake_case : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase_ ( self: str, a_: Any, a_: Any, a_: Tuple, a_: Dict, a_: Optional[int], a_: Union[str, Any], a_: Dict, a_: Tuple, a_: Any, ):
'''simple docstring'''
_snake_case : Tuple = XLMModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : List[str] = model(snake_case_, lengths=snake_case_, langs=snake_case_ )
_snake_case : Optional[Any] = model(snake_case_, langs=snake_case_ )
_snake_case : str = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: List[Any], a_: Any, a_: Union[str, Any], a_: Tuple, a_: int, a_: List[Any], a_: Union[str, Any], a_: Any, ):
'''simple docstring'''
_snake_case : Optional[Any] = XLMWithLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : int = model(snake_case_, token_type_ids=snake_case_, labels=snake_case_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: Dict, a_: Any, a_: str, a_: Union[str, Any], a_: int, a_: List[Any], a_: Tuple, a_: Union[str, Any], a_: Any, a_: Optional[int], ):
'''simple docstring'''
_snake_case : List[Any] = XLMForQuestionAnsweringSimple(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Tuple = model(snake_case_, start_positions=snake_case_, end_positions=snake_case_ )
_snake_case : Union[str, Any] = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self: List[str], a_: List[str], a_: Dict, a_: str, a_: str, a_: Tuple, a_: str, a_: List[Any], a_: int, a_: Any, ):
'''simple docstring'''
_snake_case : Union[str, Any] = XLMForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : int = model(snake_case_ )
_snake_case : List[Any] = model(
snake_case_, start_positions=snake_case_, end_positions=snake_case_, cls_index=snake_case_, is_impossible=snake_case_, p_mask=snake_case_, )
_snake_case : Optional[Any] = model(
snake_case_, start_positions=snake_case_, end_positions=snake_case_, cls_index=snake_case_, is_impossible=snake_case_, )
(_snake_case ) : Dict = result_with_labels.to_tuple()
_snake_case : Optional[Any] = model(snake_case_, start_positions=snake_case_, end_positions=snake_case_ )
(_snake_case ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: Tuple, a_: Dict, a_: Optional[Any], a_: int, a_: Any, a_: List[str], a_: List[str], a_: Optional[int], ):
'''simple docstring'''
_snake_case : Union[str, Any] = XLMForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Union[str, Any] = model(snake_case_, labels=snake_case_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: Dict, a_: str, a_: Dict, a_: List[str], a_: Union[str, Any], a_: Dict, a_: Tuple, a_: Dict, a_: Optional[Any], a_: List[Any], ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : Union[str, Any] = XLMForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[Any] = model(snake_case_, attention_mask=snake_case_, labels=snake_case_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self: Optional[int], a_: Tuple, a_: Any, a_: int, a_: int, a_: Optional[Any], a_: Union[str, Any], a_: Optional[Any], a_: List[str], a_: Union[str, Any], ):
'''simple docstring'''
_snake_case : List[str] = self.num_choices
_snake_case : Union[str, Any] = XLMForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : List[str] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : List[Any] = model(
snake_case_, attention_mask=snake_case_, token_type_ids=snake_case_, labels=snake_case_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
(
_snake_case
) : Any = config_and_inputs
_snake_case : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class lowercase( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase__ = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: str, a_: Optional[Any], a_: Optional[int], a_: str ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self: Dict, a_: Optional[int], a_: List[str], a_: Tuple=False ):
'''simple docstring'''
_snake_case : Optional[Any] = super()._prepare_for_class(snake_case_, snake_case_, return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_snake_case : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=snake_case_ )
_snake_case : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=snake_case_ )
return inputs_dict
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = XLMModelTester(self )
_snake_case : Any = ConfigTester(self, config_class=snake_case_, emb_dim=37 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: List[Any], a_: Any, a_: int, a_: List[str], a_: Dict=False, a_: Optional[int]=1 ):
'''simple docstring'''
self.assertIsInstance(snake_case_, snake_case_ )
self.assertListEqual(
[isinstance(snake_case_, snake_case_ ) for iter_attentions in attentions], [True] * len(snake_case_ ) )
self.assertEqual(len(snake_case_ ), (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case_ ):
# adds PAD dummy token
_snake_case : Optional[Any] = min_length + idx + 1
_snake_case : Optional[Any] = min_length + idx + 1
_snake_case : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(snake_case_ ) )
def UpperCamelCase_ ( self: List[Any], a_: Tuple, a_: Dict, a_: str, a_: int, a_: str, a_: List[Any]=False, a_: Any=1 ):
'''simple docstring'''
self.assertIsInstance(snake_case_, snake_case_ )
self.assertListEqual(
[isinstance(snake_case_, snake_case_ ) for iter_hidden_states in hidden_states], [True] * len(snake_case_ ), )
self.assertEqual(len(snake_case_ ), (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case_ ):
# adds PAD dummy token
_snake_case : Dict = min_length + idx + 1
_snake_case : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(snake_case_ ), )
pass
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Any = XLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowercase( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(snake_case_ )
_snake_case : List[Any] = torch.tensor([[14, 447]], dtype=torch.long, device=snake_case_ ) # the president
_snake_case : List[Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_snake_case : Any = model.generate(snake_case_, do_sample=snake_case_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), snake_case_ )
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''ConditionalDetrFeatureExtractor''']
A_ = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A_ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[int], a_: Tuple, a_: Dict, a_: Any = None, a_: List[Any] = None ):
'''simple docstring'''
_snake_case : List[str] = None
_snake_case : List[str] = os.path.abspath(os.path.join("""examples""", """by_feature""" ) )
_snake_case : Tuple = os.path.abspath("""examples""" )
for item in os.listdir(__lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
_snake_case : Optional[int] = os.path.join(__lowerCamelCase, __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCamelCase, feature_script=__lowerCamelCase, tested_section="""main()""" if parser_only else """training_function()""", ):
_snake_case : Optional[int] = compare_against_test(
os.path.join(__lowerCamelCase, __lowerCamelCase ), __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_snake_case : Tuple = '''\n'''.join(__lowerCamelCase )
if special_strings is not None:
for string in special_strings:
_snake_case : int = diff.replace(__lowerCamelCase, """""" )
self.assertEqual(__lowerCamelCase, """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
self.one_complete_example("""complete_nlp_example.py""", __lowerCamelCase )
self.one_complete_example("""complete_nlp_example.py""", __lowerCamelCase )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = os.path.abspath(os.path.join("""examples""", """cv_example.py""" ) )
_snake_case : Tuple = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example("""complete_cv_example.py""", __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
self.one_complete_example("""complete_cv_example.py""", __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class lowercase( lowerCamelCase__ ):
'''simple docstring'''
lowercase__ = False
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any] ):
'''simple docstring'''
super().setUpClass()
_snake_case : List[Any] = tempfile.mkdtemp()
_snake_case : str = os.path.join(cls._tmpdir, """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_snake_case : Optional[int] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase_ ( cls: List[str] ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[str] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, """epoch_0""" ) ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
_snake_case : str = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, """step_2""" ) ) )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n ".split()
_snake_case : Optional[int] = run_command(self._launch_args + testargs, return_stdout=__lowerCamelCase )
self.assertNotIn("""epoch 0:""", __lowerCamelCase )
self.assertIn("""epoch 1:""", __lowerCamelCase )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Any = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n ".split()
_snake_case : Any = run_command(self._launch_args + testargs, return_stdout=__lowerCamelCase )
if torch.cuda.is_available():
_snake_case : Union[str, Any] = torch.cuda.device_count()
else:
_snake_case : int = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""", __lowerCamelCase )
self.assertIn("""epoch 1:""", __lowerCamelCase )
else:
self.assertIn("""epoch 0:""", __lowerCamelCase )
self.assertIn("""epoch 1:""", __lowerCamelCase )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ, {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
_snake_case : Union[str, Any] = run_command(self._launch_args + testargs, return_stdout=__lowerCamelCase )
_snake_case : List[Any] = re.findall("""({.+})""", __lowerCamelCase )
_snake_case : Union[str, Any] = [r for r in results if '''accuracy''' in r][-1]
_snake_case : List[str] = ast.literal_eval(__lowerCamelCase )
self.assertGreaterEqual(results["""accuracy"""], 0.75 )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
_snake_case : Any = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase, """tracking""" ) ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Dict = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from torch import nn
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
A_ = '''\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'''
A_ = '''\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'''
A_ = '''\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'''
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] = None , snake_case__ : Tuple = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
_snake_case : int = new_id
# turn into Numpy arrays
_snake_case : int = np.array(__SCREAMING_SNAKE_CASE )
_snake_case : Any = np.array(__SCREAMING_SNAKE_CASE )
if reduce_labels:
_snake_case : str = 2_55
_snake_case : str = label - 1
_snake_case : List[Any] = 2_55
_snake_case : Union[str, Any] = label != ignore_index
_snake_case : Dict = np.not_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_snake_case : Tuple = pred_label[mask]
_snake_case : Any = np.array(__SCREAMING_SNAKE_CASE )[mask]
_snake_case : List[str] = pred_label[pred_label == label]
_snake_case : int = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
_snake_case : Tuple = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
_snake_case : List[Any] = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
_snake_case : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int = None , snake_case__ : Any = False , ):
_snake_case : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case : int = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case : int = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_snake_case : int = intersect_and_union(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int = None , snake_case__ : int = None , snake_case__ : Union[str, Any] = False , ):
_snake_case : Tuple = total_intersect_and_union(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# compute metrics
_snake_case : Union[str, Any] = {}
_snake_case : int = total_area_intersect.sum() / total_area_label.sum()
_snake_case : str = total_area_intersect / total_area_union
_snake_case : Union[str, Any] = total_area_intersect / total_area_label
_snake_case : List[str] = np.nanmean(__SCREAMING_SNAKE_CASE )
_snake_case : str = np.nanmean(__SCREAMING_SNAKE_CASE )
_snake_case : Any = all_acc
_snake_case : Union[str, Any] = iou
_snake_case : Tuple = acc
if nan_to_num is not None:
_snake_case : str = {metric: np.nan_to_num(__SCREAMING_SNAKE_CASE , nan=__SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ), reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
], )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Any, a_: List[Any], a_: str, a_: Optional[int] = None, a_: int = None, a_: Optional[int] = False, ):
'''simple docstring'''
_snake_case : Optional[Any] = mean_iou(
results=lowercase_, gt_seg_maps=lowercase_, num_labels=lowercase_, ignore_index=lowercase_, nan_to_num=lowercase_, label_map=lowercase_, reduce_labels=lowercase_, )
return iou_result
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowercase__ = TextToVideoSDPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""), up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""), cross_attention_dim=32, attention_head_dim=4, )
_snake_case : int = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=A_, set_alpha_to_one=A_, )
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act="""gelu""", projection_dim=512, )
_snake_case : List[Any] = CLIPTextModel(A_ )
_snake_case : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase_ ( self: Dict, a_: int, a_: List[Any]=0 ):
'''simple docstring'''
if str(A_ ).startswith("""mps""" ):
_snake_case : int = torch.manual_seed(A_ )
else:
_snake_case : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
_snake_case : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Tuple = self.get_dummy_components()
_snake_case : Any = TextToVideoSDPipeline(**A_ )
_snake_case : Optional[int] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_snake_case : Tuple = self.get_dummy_inputs(A_ )
_snake_case : Any = """np"""
_snake_case : List[Any] = sd_pipe(**A_ ).frames
_snake_case : Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Tuple = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_, expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_, expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_snake_case : Dict = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_snake_case : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : str = pipe.to("""cuda""" )
_snake_case : Any = """Spiderman is surfing"""
_snake_case : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_snake_case : Any = pipe(A_, generator=A_, num_inference_steps=25, output_type="""pt""" ).frames
_snake_case : Union[str, Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_snake_case : List[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_snake_case : Any = pipe.to("""cuda""" )
_snake_case : Dict = """Spiderman is surfing"""
_snake_case : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
_snake_case : Any = pipe(A_, generator=A_, num_inference_steps=2, output_type="""pt""" ).frames
_snake_case : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[list] ):
"""simple docstring"""
_snake_case : Optional[int] = current_set.copy()
for row_index, row in enumerate(_lowerCamelCase ):
_snake_case : Any = row[0]
for column_index, column in enumerate(_lowerCamelCase ):
if magnitude == 0:
_snake_case : str = column
continue
_snake_case : List[Any] = column / magnitude
# Subtract to cancel term
_snake_case : List[Any] = current_set[0]
_snake_case : int = [first_row]
_snake_case : Optional[int] = current_set[1::]
for row in current_set:
_snake_case : List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowerCamelCase )
continue
for column_index in range(len(_lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_snake_case : str = final_set[0]
_snake_case : Any = []
_snake_case : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_snake_case : Dict = simplify(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowerCamelCase )
_snake_case : Dict = resultant
return final_set
def UpperCAmelCase__ (snake_case__ : list[list] ):
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_snake_case : Dict = len(_lowerCamelCase ) + 1
if any(len(_lowerCamelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_lowerCamelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_snake_case : Any = equations.copy()
if any(0 in row for row in data_set ):
_snake_case : Optional[int] = data_set.copy()
_snake_case : Union[str, Any] = []
for row_index, row in enumerate(_lowerCamelCase ):
if 0 not in row:
_snake_case : List[Any] = data_set.pop(_lowerCamelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _lowerCamelCase )
_snake_case : int = data_set.copy()
_snake_case : List[str] = simplify(_lowerCamelCase )
_snake_case : List[Any] = simplified[::-1]
_snake_case : list = []
for row in simplified:
_snake_case : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_snake_case : Optional[Any] = row.copy()[: len(_lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowerCamelCase ) == 0:
solutions.append(0 )
continue
_snake_case : int = temp_row[1::]
_snake_case : Tuple = temp_row[::-1]
for column_index, column in enumerate(_lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowerCamelCase )
_snake_case : str = []
for item in solutions:
final.append(float(round(_lowerCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowercase:
'''simple docstring'''
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Dict, a_: np.ndarray, a_: np.ndarray, a_: float ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(__UpperCamelCase, __UpperCamelCase, f"Difference between torch and flax is {diff} (>= {tol})." )
def UpperCamelCase_ ( self: Optional[int], a_: Dict, a_: List[str], a_: List[Any], a_: Tuple, a_: Union[str, Any]=None, **a_: Optional[int] ):
'''simple docstring'''
_snake_case : str = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase, __UpperCamelCase )
_snake_case : List[Any] = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
_snake_case : List[str] = model(input_ids=__UpperCamelCase, pixel_values=__UpperCamelCase, attention_mask=__UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape, (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase_ ( self: str, a_: List[str], a_: str, a_: Dict, a_: Dict, a_: List[str]=None, **a_: str ):
'''simple docstring'''
_snake_case , _snake_case : str = self.get_vision_text_model(__UpperCamelCase, __UpperCamelCase )
_snake_case : str = {"""vision_model""": vision_model, """text_model""": text_model}
_snake_case : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
_snake_case : int = model(input_ids=__UpperCamelCase, pixel_values=__UpperCamelCase, attention_mask=__UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape, (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase_ ( self: str, a_: str, a_: Optional[int], a_: Union[str, Any], a_: Optional[int], a_: List[str]=None, **a_: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.get_vision_text_model(__UpperCamelCase, __UpperCamelCase )
_snake_case : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
_snake_case : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
_snake_case : Dict = model(input_ids=__UpperCamelCase, pixel_values=__UpperCamelCase, attention_mask=__UpperCamelCase )
_snake_case : List[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
_snake_case : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
_snake_case : Union[str, Any] = model(input_ids=__UpperCamelCase, pixel_values=__UpperCamelCase, attention_mask=__UpperCamelCase )
_snake_case : Tuple = after_output[0]
_snake_case : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase, 1E-3 )
def UpperCamelCase_ ( self: Any, a_: List[Any], a_: Tuple, a_: Dict, a_: str, a_: List[str]=None, **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.get_vision_text_model(__UpperCamelCase, __UpperCamelCase )
_snake_case : Any = {"""vision_model""": vision_model, """text_model""": text_model}
_snake_case : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
_snake_case : List[Any] = model(
input_ids=__UpperCamelCase, pixel_values=__UpperCamelCase, attention_mask=__UpperCamelCase, output_attentions=__UpperCamelCase )
_snake_case : Tuple = output.vision_model_output.attentions
self.assertEqual(len(__UpperCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Any = to_atuple(vision_model.config.image_size )
_snake_case : Any = to_atuple(vision_model.config.patch_size )
_snake_case : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case : List[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
_snake_case : Dict = output.text_model_output.attentions
self.assertEqual(len(__UpperCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase_ ( self: int, a_: Dict, a_: Dict, a_: List[str] ):
'''simple docstring'''
pt_model.to(__UpperCamelCase )
pt_model.eval()
# prepare inputs
_snake_case : List[str] = inputs_dict
_snake_case : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_snake_case : Tuple = pt_model(**__UpperCamelCase ).to_tuple()
_snake_case : str = fx_model(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ), len(__UpperCamelCase ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCamelCase )
_snake_case : int = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase )
_snake_case : List[Any] = fx_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ), len(__UpperCamelCase ), """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCamelCase )
_snake_case : Optional[Any] = VisionTextDualEncoderModel.from_pretrained(__UpperCamelCase, from_flax=__UpperCamelCase )
pt_model_loaded.to(__UpperCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
_snake_case : str = pt_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ), len(__UpperCamelCase ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(__UpperCamelCase, pt_output_loaded.numpy(), 4E-2 )
def UpperCamelCase_ ( self: List[Any], a_: Tuple, a_: Optional[int], a_: Any ):
'''simple docstring'''
_snake_case : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase, __UpperCamelCase )
_snake_case : List[str] = VisionTextDualEncoderModel(__UpperCamelCase )
_snake_case : Any = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
_snake_case : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), __UpperCamelCase )
_snake_case : int = fx_state
self.check_pt_flax_equivalence(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
def UpperCamelCase_ ( self: str, a_: List[str], a_: List[str], a_: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase, __UpperCamelCase )
_snake_case : List[Any] = VisionTextDualEncoderModel(__UpperCamelCase )
_snake_case : int = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
_snake_case : List[str] = load_flax_weights_in_pytorch_model(__UpperCamelCase, fx_model.params )
self.check_pt_flax_equivalence(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCamelCase )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCamelCase )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCamelCase )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCamelCase )
@is_pt_flax_cross_test
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = self.prepare_config_and_inputs()
_snake_case : Dict = config_inputs_dict.pop("""vision_config""" )
_snake_case : Union[str, Any] = config_inputs_dict.pop("""text_config""" )
_snake_case : Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
self.check_equivalence_flax_to_pt(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.get_pretrained_model_and_inputs()
_snake_case : Tuple = model_a(**__UpperCamelCase )
_snake_case : List[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCamelCase )
_snake_case : str = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
_snake_case : str = model_a(**__UpperCamelCase )
_snake_case : int = after_outputs[0]
_snake_case : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase, 1E-5 )
@require_flax
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""", """hf-internal-testing/tiny-bert""", vision_from_pt=__UpperCamelCase, text_from_pt=__UpperCamelCase, )
_snake_case : Union[str, Any] = 13
_snake_case : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_snake_case : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
_snake_case : Union[str, Any] = random_attention_mask([batch_size, 4] )
_snake_case : Any = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self: List[Any], a_: str, a_: Any ):
'''simple docstring'''
_snake_case : int = FlaxViTModel(__UpperCamelCase )
_snake_case : Tuple = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[Any] = FlaxViTModelTester(self )
_snake_case : Tuple = FlaxBertModelTester(self )
_snake_case : Tuple = vit_model_tester.prepare_config_and_inputs()
_snake_case : List[Any] = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case : Optional[Any] = vision_config_and_inputs
_snake_case , _snake_case , _snake_case , _snake_case : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""", """hf-internal-testing/tiny-bert""", vision_from_pt=__UpperCamelCase, text_from_pt=__UpperCamelCase, )
_snake_case : str = 13
_snake_case : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_snake_case : Any = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
_snake_case : Any = random_attention_mask([batch_size, 4] )
_snake_case : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self: List[Any], a_: int, a_: List[Any] ):
'''simple docstring'''
_snake_case : Tuple = FlaxCLIPVisionModel(__UpperCamelCase )
_snake_case : str = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = FlaxCLIPVisionModelTester(self )
_snake_case : Any = FlaxBertModelTester(self )
_snake_case : str = clip_model_tester.prepare_config_and_inputs()
_snake_case : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case : Dict = vision_config_and_inputs
_snake_case , _snake_case , _snake_case , _snake_case : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""", logit_scale_init_value=1.0 )
_snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_snake_case : int = processor(
text=["""una foto di un gatto""", """una foto di un cane"""], images=__UpperCamelCase, padding=__UpperCamelCase, return_tensors="""np""" )
_snake_case : List[str] = model(**__UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_snake_case : Any = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, __UpperCamelCase, atol=1E-3 ) )
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class lowercase( A_ ):
'''simple docstring'''
def __init__( self: str, **a_: Tuple ):
'''simple docstring'''
super().__init__(**a_ )
requires_backends(self, """vision""" )
requires_backends(self, """torch""" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(a_ )
def UpperCamelCase_ ( self: Union[str, Any], **a_: List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Tuple = {}
_snake_case : Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
_snake_case : str = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_snake_case : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_snake_case : Optional[Any] = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_snake_case : Dict = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_snake_case : Dict = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_snake_case : List[Any] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_snake_case : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_snake_case : Optional[int] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_snake_case : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_snake_case : List[Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_snake_case : int = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_snake_case : Tuple = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Dict, a_: Dict, *a_: Union[str, Any], a_: Tuple=None, a_: List[Any]=None, **a_: Union[str, Any] ):
'''simple docstring'''
return super().__call__(a_, *a_, num_workers=a_, batch_size=a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: Union[str, Any], a_: Dict=64, a_: Union[str, Any] = 0, a_: Tuple = 512 / 1_500, a_: Dict = 32, a_: Union[str, Any] = 1, ):
'''simple docstring'''
_snake_case : str = load_image(a_ )
_snake_case : Optional[int] = self.image_processor.size["""longest_edge"""]
_snake_case : List[str] = self.image_processor.generate_crop_boxes(
a_, a_, a_, a_, a_, a_ )
_snake_case : List[str] = self.image_processor(images=a_, return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_snake_case : str = self.get_inference_context()
with inference_context():
_snake_case : int = self._ensure_tensor_on_device(a_, device=self.device )
_snake_case : List[str] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_snake_case : int = image_embeddings
_snake_case : str = grid_points.shape[1]
_snake_case : Optional[int] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0, a_, a_ ):
_snake_case : Any = grid_points[:, i : i + points_per_batch, :, :]
_snake_case : List[str] = input_labels[:, i : i + points_per_batch]
_snake_case : Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase_ ( self: List[Any], a_: List[str], a_: Optional[Any]=0.88, a_: Union[str, Any]=0.95, a_: Tuple=0, a_: Dict=1, ):
'''simple docstring'''
_snake_case : Tuple = model_inputs.pop("""input_boxes""" )
_snake_case : List[str] = model_inputs.pop("""is_last""" )
_snake_case : Union[str, Any] = model_inputs.pop("""original_sizes""" ).tolist()
_snake_case : List[str] = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_snake_case : Any = self.model(**a_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_snake_case : Optional[int] = model_outputs["""pred_masks"""]
_snake_case : int = self.image_processor.post_process_masks(
a_, a_, a_, a_, binarize=a_ )
_snake_case : Dict = model_outputs["""iou_scores"""]
_snake_case : List[str] = self.image_processor.filter_masks(
masks[0], iou_scores[0], original_sizes[0], input_boxes[0], a_, a_, a_, a_, )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase_ ( self: Tuple, a_: str, a_: List[str]=False, a_: List[str]=False, a_: Optional[int]=0.7, ):
'''simple docstring'''
_snake_case : int = []
_snake_case : Optional[int] = []
_snake_case : List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_snake_case : Union[str, Any] = torch.cat(a_ )
_snake_case : Any = torch.cat(a_ )
_snake_case : Dict = self.image_processor.post_process_for_mask_generation(
a_, a_, a_, a_ )
_snake_case : Optional[int] = defaultdict(a_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(a_ )
_snake_case : Optional[Any] = {}
if output_rle_mask:
_snake_case : Dict = rle_mask
if output_bboxes_mask:
_snake_case : str = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=10_24 ):
"""simple docstring"""
_snake_case : List[Any] = [], []
_snake_case : Optional[int] = list(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
_snake_case : List[str] = sorted_examples[0]
def is_too_big(snake_case__ : List[str] ):
return tok(lowerCAmelCase__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_snake_case : Dict = new_src + ' ' + src
_snake_case : Optional[int] = new_tgt + ' ' + tgt
if is_too_big(lowerCAmelCase__ ) or is_too_big(lowerCAmelCase__ ): # cant fit, finalize example
finished_src.append(lowerCAmelCase__ )
finished_tgt.append(lowerCAmelCase__ )
_snake_case : int = src, tgt
else: # can fit, keep adding
_snake_case : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase__ )
finished_tgt.append(lowerCAmelCase__ )
return finished_src, finished_tgt
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Path , snake_case__ : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : int = Path(lowerCAmelCase__ )
save_path.mkdir(exist_ok=lowerCAmelCase__ )
for split in ["train"]:
_snake_case : Dict = data_dir / F"{split}.source", data_dir / F"{split}.target"
_snake_case : List[Any] = [x.rstrip() for x in Path(lowerCAmelCase__ ).open().readlines()]
_snake_case : Optional[Any] = [x.rstrip() for x in Path(lowerCAmelCase__ ).open().readlines()]
_snake_case : List[Any] = pack_examples(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print(F"packed {split} split from {len(lowerCAmelCase__ )} examples -> {len(lowerCAmelCase__ )}." )
Path(save_path / F"{split}.source" ).open("""w""" ).write("""\n""".join(lowerCAmelCase__ ) )
Path(save_path / F"{split}.target" ).open("""w""" ).write("""\n""".join(lowerCAmelCase__ ) )
for split in ["val", "test"]:
_snake_case : str = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(lowerCAmelCase__ , save_path / F"{split}.source" )
shutil.copyfile(lowerCAmelCase__ , save_path / F"{split}.target" )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=lowerCAmelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=lowerCAmelCase__ , default=1_28 )
parser.add_argument("""--data_dir""" , type=lowerCAmelCase__ )
parser.add_argument("""--save_path""" , type=lowerCAmelCase__ )
_snake_case : List[str] = parser.parse_args()
_snake_case : List[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCAmelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase( a__ ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_, """embed_dim""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase_, """num_heads""" ) )
class lowercase:
'''simple docstring'''
def __init__( self: str, a_: int, a_: str=13, a_: List[str]=64, a_: Optional[int]=3, a_: Dict=[16, 48, 96], a_: Optional[int]=[1, 3, 6], a_: Dict=[1, 2, 10], a_: int=[7, 3, 3], a_: Dict=[4, 2, 2], a_: List[str]=[2, 1, 1], a_: Optional[int]=[2, 2, 2], a_: Any=[False, False, True], a_: List[str]=[0.0, 0.0, 0.0], a_: Dict=0.02, a_: Tuple=1E-12, a_: Any=True, a_: str=True, a_: Optional[Any]=2, ):
'''simple docstring'''
_snake_case : str = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : Any = patch_sizes
_snake_case : List[str] = patch_stride
_snake_case : Tuple = patch_padding
_snake_case : Dict = is_training
_snake_case : List[Any] = use_labels
_snake_case : Optional[int] = num_labels
_snake_case : List[Any] = num_channels
_snake_case : Optional[int] = embed_dim
_snake_case : str = num_heads
_snake_case : List[str] = stride_kv
_snake_case : Any = depth
_snake_case : List[Any] = cls_token
_snake_case : Optional[int] = attention_drop_rate
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : List[Any] = ids_tensor([self.batch_size], self.num_labels )
_snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self: List[str], a_: Tuple, a_: List[Any], a_: str ):
'''simple docstring'''
_snake_case : Dict = CvtModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : Union[str, Any] = model(lowerCamelCase_ )
_snake_case : Optional[int] = (self.image_size, self.image_size)
_snake_case , _snake_case : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_snake_case : List[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_snake_case : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.num_labels
_snake_case : Union[str, Any] = CvtForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Dict = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = CvtModelTester(self )
_snake_case : List[Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=37 )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = model_class(lowerCamelCase_ )
_snake_case : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Optional[int] = [*signature.parameters.keys()]
_snake_case : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
def check_hidden_states_output(a_: int, a_: Union[str, Any], a_: int ):
_snake_case : Optional[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
_snake_case : List[Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
_snake_case : Tuple = outputs.hidden_states
_snake_case : int = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Any = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = CvtModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
_snake_case : int = self.default_image_processor
_snake_case : List[str] = prepare_img()
_snake_case : str = image_processor(images=lowerCamelCase_, return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
_snake_case : Tuple = model(**lowerCamelCase_ )
# verify the logits
_snake_case : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
_snake_case : Any = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1E-4 ) )
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A_ = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''DeiTFeatureExtractor''']
A_ = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowercase__ = StableDiffusionDiffEditPipeline
lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : int = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase_, )
_snake_case : str = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=lowerCAmelCase_, set_alpha_to_one=lowerCAmelCase_, )
_snake_case : Union[str, Any] = DDIMInverseScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=lowerCAmelCase_, set_alpha_to_zero=lowerCAmelCase_, )
torch.manual_seed(0 )
_snake_case : List[str] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
_snake_case : int = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act="""gelu""", projection_dim=512, )
_snake_case : Any = CLIPTextModel(lowerCAmelCase_ )
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase_ ( self: int, a_: List[Any], a_: Dict=0 ):
'''simple docstring'''
_snake_case : Union[str, Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_snake_case : List[Any] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_snake_case : int = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case : Any = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case : Optional[int] = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: str=0 ):
'''simple docstring'''
_snake_case : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_snake_case : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
_snake_case : List[str] = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_snake_case : List[Any] = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case : str = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Optional[Any]=0 ):
'''simple docstring'''
_snake_case : int = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_snake_case : Optional[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
_snake_case : str = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_snake_case : Dict = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case : Optional[int] = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if not hasattr(self.pipeline_class, """_optional_components""" ):
return
_snake_case : Dict = self.get_dummy_components()
_snake_case : Optional[Any] = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_snake_case : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case : Optional[int] = pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case : Dict = self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase_, lowerCAmelCase_ ) is None, f"`{optional_component}` did not stay set to None after loading.", )
_snake_case : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case : Any = pipe_loaded(**lowerCAmelCase_ )[0]
_snake_case : int = np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase_, 1E-4 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[Any] = """cpu"""
_snake_case : Tuple = self.get_dummy_components()
_snake_case : int = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case : Optional[Any] = self.get_dummy_mask_inputs(lowerCAmelCase_ )
_snake_case : Union[str, Any] = pipe.generate_mask(**lowerCAmelCase_ )
_snake_case : Any = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
_snake_case : Optional[Any] = np.array([0] * 9 )
_snake_case : str = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = """cpu"""
_snake_case : Union[str, Any] = self.get_dummy_components()
_snake_case : Dict = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case : Optional[int] = self.get_dummy_inversion_inputs(lowerCAmelCase_ )
_snake_case : Optional[int] = pipe.invert(**lowerCAmelCase_ ).images
_snake_case : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
_snake_case : Optional[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799], )
_snake_case : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_, 1E-3 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = """cpu"""
_snake_case : List[str] = self.get_dummy_components()
_snake_case : Optional[int] = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
_snake_case : int = DPMSolverMultistepScheduler(**lowerCAmelCase_ )
_snake_case : str = DPMSolverMultistepInverseScheduler(**lowerCAmelCase_ )
_snake_case : List[str] = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case : Any = self.get_dummy_inversion_inputs(lowerCAmelCase_ )
_snake_case : List[str] = pipe.invert(**lowerCAmelCase_ ).images
_snake_case : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
_snake_case : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799], )
_snake_case : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_, 1E-3 )
@require_torch_gpu
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls: int ):
'''simple docstring'''
_snake_case : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
_snake_case : Dict = raw_image.convert("""RGB""" ).resize((768, 768) )
_snake_case : Any = raw_image
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : str = torch.manual_seed(0 )
_snake_case : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""", safety_checker=lowerCAmelCase_, torch_dtype=torch.floataa )
_snake_case : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
_snake_case : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case : Optional[int] = """a bowl of fruit"""
_snake_case : List[str] = """a bowl of pears"""
_snake_case : Optional[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase_, target_prompt=lowerCAmelCase_, generator=lowerCAmelCase_, )
_snake_case : Any = pipe.invert(
prompt=lowerCAmelCase_, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase_ ).latents
_snake_case : List[str] = pipe(
prompt=lowerCAmelCase_, mask_image=lowerCAmelCase_, image_latents=lowerCAmelCase_, generator=lowerCAmelCase_, negative_prompt=lowerCAmelCase_, inpaint_strength=0.7, output_type="""numpy""", ).images[0]
_snake_case : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : Any = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""", safety_checker=lowerCAmelCase_, torch_dtype=torch.floataa )
_snake_case : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case : List[str] = """a bowl of fruit"""
_snake_case : Optional[Any] = """a bowl of pears"""
_snake_case : int = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase_, target_prompt=lowerCAmelCase_, generator=lowerCAmelCase_, )
_snake_case : Optional[Any] = pipe.invert(
prompt=lowerCAmelCase_, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase_, num_inference_steps=25, ).latents
_snake_case : Optional[int] = pipe(
prompt=lowerCAmelCase_, mask_image=lowerCAmelCase_, image_latents=lowerCAmelCase_, generator=lowerCAmelCase_, negative_prompt=lowerCAmelCase_, inpaint_strength=0.7, num_inference_steps=25, output_type="""numpy""", ).images[0]
_snake_case : str = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
A_ = None
A_ = {
'''7B''': 1_10_08,
'''13B''': 1_38_24,
'''30B''': 1_79_20,
'''65B''': 2_20_16,
'''70B''': 2_86_72,
}
A_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str=1 , snake_case__ : List[str]=2_56 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
with open(a_ , """r""" ) as f:
return json.load(a_ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
with open(a_ , """w""" ) as f:
json.dump(a_ , a_ )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Any=True ):
"""simple docstring"""
os.makedirs(a_ , exist_ok=a_ )
_snake_case : Any = os.path.join(a_ , """tmp""" )
os.makedirs(a_ , exist_ok=a_ )
_snake_case : Union[str, Any] = read_json(os.path.join(a_ , """params.json""" ) )
_snake_case : List[str] = NUM_SHARDS[model_size]
_snake_case : Union[str, Any] = params['''n_layers''']
_snake_case : List[Any] = params['''n_heads''']
_snake_case : Any = n_heads // num_shards
_snake_case : Tuple = params['''dim''']
_snake_case : List[Any] = dim // n_heads
_snake_case : List[Any] = 1_00_00.0
_snake_case : List[Any] = 1.0 / (base ** (torch.arange(0 , a_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_snake_case : int = params['''n_kv_heads'''] # for GQA / MQA
_snake_case : Optional[Any] = n_heads_per_shard // num_key_value_heads
_snake_case : Any = dim // num_key_value_heads
else: # compatibility with other checkpoints
_snake_case : List[Any] = n_heads
_snake_case : int = n_heads_per_shard
_snake_case : Union[str, Any] = dim
# permute for sliced rotary
def permute(snake_case__ : Any , snake_case__ : Optional[int]=n_heads , snake_case__ : Any=dim , snake_case__ : int=dim ):
return w.view(a_ , dima // n_heads // 2 , 2 , a_ ).transpose(1 , 2 ).reshape(a_ , a_ )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_snake_case : Any = torch.load(os.path.join(a_ , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_snake_case : Dict = [
torch.load(os.path.join(a_ , F"consolidated.{i:02d}.pth" ) , map_location="""cpu""" )
for i in range(a_ )
]
_snake_case : Tuple = 0
_snake_case : Any = {'''weight_map''': {}}
for layer_i in range(a_ ):
_snake_case : Optional[int] = F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_snake_case : Tuple = {
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_snake_case : int = {
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_snake_case : Union[str, Any] = permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(a_ , a_ , a_ )
for i in range(a_ )
] , dim=0 , ).reshape(a_ , a_ ) )
_snake_case : Optional[Any] = permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
a_ , a_ , a_ )
for i in range(a_ )
] , dim=0 , ).reshape(a_ , a_ ) , a_ , a_ , a_ , )
_snake_case : Dict = torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
a_ , a_ , a_ )
for i in range(a_ )
] , dim=0 , ).reshape(a_ , a_ )
_snake_case : Dict = torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(a_ )] , dim=1 )
_snake_case : List[Any] = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(a_ )] , dim=0 )
_snake_case : List[Any] = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(a_ )] , dim=1 )
_snake_case : List[str] = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(a_ )] , dim=0 )
_snake_case : Union[str, Any] = inv_freq
for k, v in state_dict.items():
_snake_case : List[Any] = filename
param_count += v.numel()
torch.save(a_ , os.path.join(a_ , a_ ) )
_snake_case : List[Any] = F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_snake_case : Optional[int] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
_snake_case : List[Any] = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(a_ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]["""output.weight"""] for i in range(a_ )] , dim=0 ),
}
for k, v in state_dict.items():
_snake_case : Any = filename
param_count += v.numel()
torch.save(a_ , os.path.join(a_ , a_ ) )
# Write configs
_snake_case : Optional[int] = {'''total_size''': param_count * 2}
write_json(a_ , os.path.join(a_ , """pytorch_model.bin.index.json""" ) )
_snake_case : Union[str, Any] = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
_snake_case : Any = params['''multiple_of'''] if '''multiple_of''' in params else 2_56
_snake_case : Optional[int] = LlamaConfig(
hidden_size=a_ , intermediate_size=compute_intermediate_size(a_ , a_ , a_ ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=a_ , )
config.save_pretrained(a_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_snake_case : str = LlamaForCausalLM.from_pretrained(a_ , torch_dtype=torch.floataa , low_cpu_mem_usage=a_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(a_ , safe_serialization=a_ )
shutil.rmtree(a_ )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_snake_case : str = tokenizer_class(a_ )
tokenizer.save_pretrained(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=a_ , help="""Whether or not to save using `safetensors`.""" )
_snake_case : Optional[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_snake_case : Optional[Any] = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , a_ )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[Any] ):
"""simple docstring"""
if len(UpperCAmelCase__ ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase__ , n - 1 )
rec_insertion_sort(UpperCAmelCase__ , n - 1 )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
if index >= len(UpperCAmelCase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_snake_case , _snake_case : Union[str, Any] = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase__ , index + 1 )
if __name__ == "__main__":
A_ = input('''Enter integers separated by spaces: ''')
A_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "BlipImageProcessor"
lowercase__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self: List[Any], a_: int, a_: Tuple ):
'''simple docstring'''
_snake_case : Tuple = False
super().__init__(__UpperCamelCase, __UpperCamelCase )
_snake_case : str = self.image_processor
def __call__( self: Tuple, a_: ImageInput = None, a_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, a_: bool = True, a_: Union[bool, str, PaddingStrategy] = False, a_: Union[bool, str, TruncationStrategy] = None, a_: Optional[int] = None, a_: int = 0, a_: Optional[int] = None, a_: Optional[bool] = None, a_: bool = False, a_: bool = False, a_: bool = False, a_: bool = False, a_: bool = False, a_: bool = True, a_: Optional[Union[str, TensorType]] = None, **a_: str, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_snake_case : Dict = self.tokenizer
_snake_case : List[str] = self.tokenizer(
text=__UpperCamelCase, add_special_tokens=__UpperCamelCase, padding=__UpperCamelCase, truncation=__UpperCamelCase, max_length=__UpperCamelCase, stride=__UpperCamelCase, pad_to_multiple_of=__UpperCamelCase, return_attention_mask=__UpperCamelCase, return_overflowing_tokens=__UpperCamelCase, return_special_tokens_mask=__UpperCamelCase, return_offsets_mapping=__UpperCamelCase, return_token_type_ids=__UpperCamelCase, return_length=__UpperCamelCase, verbose=__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase, )
return text_encoding
# add pixel_values
_snake_case : Dict = self.image_processor(__UpperCamelCase, return_tensors=__UpperCamelCase )
if text is not None:
_snake_case : List[Any] = self.tokenizer(
text=__UpperCamelCase, add_special_tokens=__UpperCamelCase, padding=__UpperCamelCase, truncation=__UpperCamelCase, max_length=__UpperCamelCase, stride=__UpperCamelCase, pad_to_multiple_of=__UpperCamelCase, return_attention_mask=__UpperCamelCase, return_overflowing_tokens=__UpperCamelCase, return_special_tokens_mask=__UpperCamelCase, return_offsets_mapping=__UpperCamelCase, return_token_type_ids=__UpperCamelCase, return_length=__UpperCamelCase, verbose=__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase, )
else:
_snake_case : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase, **__UpperCamelCase )
def UpperCamelCase_ ( self: Dict, *a_: Optional[Any], **a_: Dict ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase, **__UpperCamelCase )
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = self.tokenizer.model_input_names
_snake_case : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A_ = '''facebook/wmt19-en-de'''
A_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A_ = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
A_ = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
A_ = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
A_ = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A_ = logging.get_logger('''transformers.models.speecht5''')
A_ = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
A_ = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
A_ = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
A_ = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
A_ = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
A_ = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
A_ = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
A_ = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
A_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ = []
A_ = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
A_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
A_ = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
A_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Tuple = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : int = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_snake_case : List[str] = value
elif weight_type == "weight_g":
_snake_case : Tuple = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : Any = value
elif weight_type == "running_mean":
_snake_case : int = value
elif weight_type == "running_var":
_snake_case : Tuple = value
elif weight_type == "num_batches_tracked":
_snake_case : int = value
else:
_snake_case : Union[str, Any] = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_snake_case , _snake_case : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : List[Any] = []
if task == "s2t":
_snake_case : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
_snake_case : List[Any] = MAPPING_S2T
_snake_case : str = IGNORE_KEYS_S2T
elif task == "t2s":
_snake_case : int = None
_snake_case : Optional[Any] = MAPPING_T2S
_snake_case : List[Any] = IGNORE_KEYS_T2S
elif task == "s2s":
_snake_case : str = hf_model.speechta.encoder.prenet.feature_encoder
_snake_case : List[Any] = MAPPING_S2S
_snake_case : str = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case__ , snake_case__ ):
logger.info(F"{name} was ignored" )
continue
_snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : List[str] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_snake_case , _snake_case : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
_snake_case : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_snake_case : Union[str, Any] = True
if "*" in mapped_key:
_snake_case : Optional[Any] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : int = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : Dict = """weight_g"""
elif "weight_v" in name:
_snake_case : str = """weight_v"""
elif "bias" in name:
_snake_case : Optional[int] = """bias"""
elif "weight" in name:
_snake_case : Optional[int] = """weight"""
elif "running_mean" in name:
_snake_case : List[Any] = """running_mean"""
elif "running_var" in name:
_snake_case : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
_snake_case : str = """num_batches_tracked"""
else:
_snake_case : int = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Tuple = name.split(""".""" )
_snake_case : Optional[Any] = int(items[0] )
_snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_snake_case : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_snake_case : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_snake_case : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Dict=None , snake_case__ : List[str]=None , snake_case__ : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
_snake_case : Tuple = SpeechTaConfig.from_pretrained(snake_case__ )
else:
_snake_case : str = SpeechTaConfig()
if task == "s2t":
_snake_case : Union[str, Any] = config.max_text_positions
_snake_case : Optional[int] = SpeechTaForSpeechToText(snake_case__ )
elif task == "t2s":
_snake_case : Any = 18_76
_snake_case : str = 6_00
_snake_case : List[Any] = config.max_speech_positions
_snake_case : Dict = SpeechTaForTextToSpeech(snake_case__ )
elif task == "s2s":
_snake_case : Any = 18_76
_snake_case : str = config.max_speech_positions
_snake_case : Tuple = SpeechTaForSpeechToSpeech(snake_case__ )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
_snake_case : str = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_snake_case : Optional[int] = AddedToken("""<mask>""" , lstrip=snake_case__ , rstrip=snake_case__ )
_snake_case : List[Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_snake_case : Dict = SpeechTaFeatureExtractor()
_snake_case : Any = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(snake_case__ )
_snake_case : List[Any] = torch.load(snake_case__ )
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case__ , snake_case__ )
model.save_pretrained(snake_case__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
A_ = logging.get_logger(__name__)
class lowercase( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Dict, *a_: Union[str, Any], **a_: Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
A_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
A_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
with open(snake_case_ , """rb""" ) as f:
_snake_case : List[Any] = Image.open(snake_case_ )
return im.convert("""RGB""" )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default=UpperCamelCase_ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowercase__ = field(
default=UpperCamelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ = field(default=UpperCamelCase_ , metadata={"help": "A folder containing the training data."} )
lowercase__ = field(default=UpperCamelCase_ , metadata={"help": "A folder containing the validation data."} )
lowercase__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowercase__ = field(
default=UpperCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=UpperCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase__ = field(
default=UpperCamelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase_ )} , )
lowercase__ = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ = field(
default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowercase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ = field(default=UpperCamelCase_ , metadata={"help": "Name or path of preprocessor config."} )
lowercase__ = field(
default=UpperCamelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase__ = field(
default=UpperCamelCase_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : List[str] = torch.stack([example["""pixel_values"""] for example in examples] )
_snake_case : List[Any] = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_snake_case : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_snake_case : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_snake_case : Any = {}
if data_args.train_dir is not None:
_snake_case : str = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
_snake_case : Tuple = os.path.join(data_args.validation_dir , """**""" )
_snake_case : int = load_dataset(
"""imagefolder""" , data_files=snake_case_ , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
_snake_case : Tuple = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
_snake_case : Optional[int] = dataset['''train'''].train_test_split(data_args.train_val_split )
_snake_case : List[Any] = split['''train''']
_snake_case : List[Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_snake_case : int = dataset['''train'''].features['''labels'''].names
_snake_case : List[str] = {}, {}
for i, label in enumerate(snake_case_ ):
_snake_case : Optional[Any] = str(snake_case_ )
_snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
_snake_case : List[Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : Tuple ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_snake_case : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case_ ) , labelaid=snake_case_ , idalabel=snake_case_ , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : int = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_snake_case : Optional[Any] = image_processor.size['''shortest_edge''']
else:
_snake_case : Optional[Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
_snake_case : int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_snake_case : Tuple = Compose(
[
RandomResizedCrop(snake_case_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_snake_case : Union[str, Any] = Compose(
[
Resize(snake_case_ ),
CenterCrop(snake_case_ ),
ToTensor(),
normalize,
] )
def train_transforms(snake_case__ : Any ):
_snake_case : Any = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(snake_case__ : int ):
_snake_case : int = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_snake_case : Optional[Any] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_snake_case : List[str] = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(snake_case_ )
# Initalize our trainer
_snake_case : Optional[Any] = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=snake_case_ , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
_snake_case : List[str] = None
if training_args.resume_from_checkpoint is not None:
_snake_case : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : List[Any] = last_checkpoint
_snake_case : str = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case : Optional[int] = trainer.evaluate()
trainer.log_metrics("""eval""" , snake_case_ )
trainer.save_metrics("""eval""" , snake_case_ )
# Write model card and (optionally) push to hub
_snake_case : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class lowercase( __a ):
lowercase__ = """codegen"""
lowercase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: int, a_: Dict=50_400, a_: Dict=2_048, a_: str=2_048, a_: int=4_096, a_: Union[str, Any]=28, a_: Optional[Any]=16, a_: str=64, a_: Tuple=None, a_: Any="gelu_new", a_: Optional[Any]=0.0, a_: Optional[Any]=0.0, a_: Any=0.0, a_: Any=1E-5, a_: Dict=0.02, a_: List[Any]=True, a_: Optional[Any]=50_256, a_: List[Any]=50_256, a_: Dict=False, **a_: int, ):
'''simple docstring'''
_snake_case : int = vocab_size
_snake_case : Optional[Any] = n_ctx
_snake_case : Optional[int] = n_positions
_snake_case : Optional[Any] = n_embd
_snake_case : Optional[int] = n_layer
_snake_case : Tuple = n_head
_snake_case : int = n_inner
_snake_case : Tuple = rotary_dim
_snake_case : Optional[Any] = activation_function
_snake_case : Dict = resid_pdrop
_snake_case : Optional[Any] = embd_pdrop
_snake_case : Tuple = attn_pdrop
_snake_case : Union[str, Any] = layer_norm_epsilon
_snake_case : int = initializer_range
_snake_case : List[str] = use_cache
_snake_case : str = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=a_, eos_token_id=a_, tie_word_embeddings=a_, **a_ )
class lowercase( __a ):
def __init__( self: Tuple, a_: PretrainedConfig, a_: str = "default", a_: List[PatchingSpec] = None, a_: bool = False, ):
'''simple docstring'''
super().__init__(a_, task=a_, patching_specs=a_, use_past=a_ )
if not getattr(self._config, """pad_token_id""", a_ ):
# TODO: how to do that better?
_snake_case : Tuple = 0
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(a_, direction="""inputs""" )
_snake_case : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return self._config.n_head
def UpperCamelCase_ ( self: Optional[int], a_: PreTrainedTokenizer, a_: int = -1, a_: int = -1, a_: bool = False, a_: Optional[TensorType] = None, ):
'''simple docstring'''
_snake_case : str = super(a_, self ).generate_dummy_inputs(
a_, batch_size=a_, seq_length=a_, is_pair=a_, framework=a_ )
# We need to order the input in the way they appears in the forward()
_snake_case : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_snake_case , _snake_case : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_snake_case : int = seqlen + 2
_snake_case : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case : List[Any] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
_snake_case : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
_snake_case : Optional[Any] = ordered_inputs["""attention_mask"""].dtype
_snake_case : Dict = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(a_, a_, dtype=a_ )], dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return 13
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
A_ = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase__ (snake_case__ : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[str] = 0
if start < end:
_snake_case : Tuple = randint(snake_case__ , snake_case__ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Optional[Any] = temp
_snake_case : Dict = _in_place_partition(snake_case__ , snake_case__ , snake_case__ )
count += _in_place_quick_sort(snake_case__ , snake_case__ , p - 1 )
count += _in_place_quick_sort(snake_case__ , p + 1 , snake_case__ )
return count
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = 0
_snake_case : List[Any] = randint(snake_case__ , snake_case__ )
_snake_case : Union[str, Any] = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Tuple = temp
_snake_case : List[Any] = start - 1
for index in range(snake_case__ , snake_case__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Tuple = new_pivot_index + 1
_snake_case : List[str] = a[new_pivot_index]
_snake_case : List[Any] = a[index]
_snake_case : List[Any] = temp
_snake_case : str = a[new_pivot_index + 1]
_snake_case : Dict = a[end]
_snake_case : Dict = temp
return new_pivot_index + 1, count
A_ = TemporaryFile()
A_ = 1_00 # 1000 elements are to be sorted
A_ , A_ = 0, 1 # mean and standard deviation
A_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
A_ = np.load(outfile)
A_ = len(M) - 1
A_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Tuple, a_: Optional[Any], a_: Optional[Any]=7, a_: Any=3, a_: Tuple=18, a_: int=30, a_: Union[str, Any]=400, a_: List[Any]=True, a_: int=None, a_: Optional[Any]=True, a_: List[str]=None, a_: Optional[Any]=True, a_: Tuple=[0.48_145_466, 0.4_578_275, 0.40_821_073], a_: Optional[int]=[0.26_862_954, 0.26_130_258, 0.27_577_711], a_: Union[str, Any]=True, ):
'''simple docstring'''
_snake_case : str = size if size is not None else {"""height""": 224, """width""": 224}
_snake_case : str = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_snake_case : Tuple = parent
_snake_case : List[str] = batch_size
_snake_case : Tuple = num_channels
_snake_case : int = image_size
_snake_case : Dict = min_resolution
_snake_case : Optional[Any] = max_resolution
_snake_case : Any = do_resize
_snake_case : Tuple = size
_snake_case : int = do_center_crop
_snake_case : List[Any] = crop_size
_snake_case : Optional[Any] = do_normalize
_snake_case : Optional[Any] = image_mean
_snake_case : Dict = image_std
_snake_case : str = do_convert_rgb
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase_ ( self: Union[str, Any], a_: Any=False, a_: List[str]=False, a_: Union[str, Any]=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_snake_case : List[Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta ) )
else:
_snake_case : Any = []
for i in range(self.batch_size ):
_snake_case : Union[str, Any] = np.random.choice(np.arange(self.min_resolution, self.max_resolution ), 2 )
image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_snake_case : str = [Image.fromarray(np.moveaxis(a_, 0, -1 ) ) for x in image_inputs]
if torchify:
_snake_case : Dict = [torch.from_numpy(a_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = ChineseCLIPImageProcessingTester(self, do_center_crop=a_ )
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, """do_resize""" ) )
self.assertTrue(hasattr(a_, """size""" ) )
self.assertTrue(hasattr(a_, """do_center_crop""" ) )
self.assertTrue(hasattr(a_, """center_crop""" ) )
self.assertTrue(hasattr(a_, """do_normalize""" ) )
self.assertTrue(hasattr(a_, """image_mean""" ) )
self.assertTrue(hasattr(a_, """image_std""" ) )
self.assertTrue(hasattr(a_, """do_convert_rgb""" ) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size, {"""height""": 18, """width""": 18} )
_snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size, {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
_snake_case : int = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
# Test batched
_snake_case : Union[str, Any] = image_processing(a_, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=a_, numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, np.ndarray )
# Test not batched input
_snake_case : str = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
# Test batched
_snake_case : Optional[int] = image_processing(a_, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : str = self.image_processor_tester.prepare_inputs(equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test not batched input
_snake_case : Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
# Test batched
_snake_case : str = image_processing(a_, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
@require_torch
@require_vision
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[str] = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=a_ )
_snake_case : Optional[int] = 3
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, """do_resize""" ) )
self.assertTrue(hasattr(a_, """size""" ) )
self.assertTrue(hasattr(a_, """do_center_crop""" ) )
self.assertTrue(hasattr(a_, """center_crop""" ) )
self.assertTrue(hasattr(a_, """do_normalize""" ) )
self.assertTrue(hasattr(a_, """image_mean""" ) )
self.assertTrue(hasattr(a_, """image_std""" ) )
self.assertTrue(hasattr(a_, """do_convert_rgb""" ) )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
_snake_case : Dict = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
# Test batched
_snake_case : List[str] = image_processing(a_, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = MvpTokenizer
lowercase__ = MvpTokenizerFast
lowercase__ = True
lowercase__ = filter_roberta_detectors
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_snake_case : Optional[int] = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : Optional[int] = {"""unk_token""": """<unk>"""}
_snake_case : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Union[str, Any], **a_: Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], **a_: List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: List[Any], a_: int ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : List[Any] = tokenizer(a_, max_length=len(a_ ), padding=a_, return_tensors="""pt""" )
self.assertIsInstance(a_, a_ )
self.assertEqual((2, 9), batch.input_ids.shape )
self.assertEqual((2, 9), batch.attention_mask.shape )
_snake_case : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(a_, a_ )
# Test that special tokens are reset
@require_torch
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Dict = tokenizer(a_, padding=a_, return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""labels""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
@require_torch
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : int = tokenizer(text_target=a_, max_length=32, padding="""max_length""", return_tensors="""pt""" )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
@require_torch
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Optional[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""], padding=a_, truncation=a_, return_tensors="""pt""" )
self.assertIsInstance(a_, a_ )
self.assertEqual(batch.input_ids.shape, (2, 1_024) )
@require_torch
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = ["""A long paragraph for summarization."""]
_snake_case : Tuple = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Dict = tokenizer(a_, text_target=a_, return_tensors="""pt""" )
_snake_case : List[str] = inputs["""input_ids"""]
_snake_case : List[str] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : Any = self.tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : int = """A, <mask> AllenNLP sentence."""
_snake_case : List[Any] = tokenizer_r.encode_plus(a_, add_special_tokens=a_, return_token_type_ids=a_ )
_snake_case : List[str] = tokenizer_p.encode_plus(a_, add_special_tokens=a_, return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ), sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ), sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ), )
_snake_case : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_snake_case : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a_, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
a_, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A_ = logging.get_logger('''transformers.models.speecht5''')
A_ = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
A_ = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
A_ = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
A_ = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
A_ = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
A_ = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
A_ = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
A_ = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
A_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ = []
A_ = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
A_ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
A_ = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
A_ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : List[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_snake_case : Optional[Any] = value
elif weight_type == "weight_g":
_snake_case : List[str] = value
elif weight_type == "weight_v":
_snake_case : int = value
elif weight_type == "bias":
_snake_case : Tuple = value
elif weight_type == "running_mean":
_snake_case : Optional[Any] = value
elif weight_type == "running_var":
_snake_case : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_snake_case : Optional[Any] = value
else:
_snake_case : int = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_snake_case : int = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Tuple = []
if task == "s2t":
_snake_case : int = hf_model.speechta.encoder.prenet.feature_encoder
_snake_case : Tuple = MAPPING_S2T
_snake_case : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
_snake_case : int = None
_snake_case : Union[str, Any] = MAPPING_T2S
_snake_case : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
_snake_case : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
_snake_case : Tuple = MAPPING_S2S
_snake_case : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case__ , snake_case__ ):
logger.info(F"{name} was ignored" )
continue
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_snake_case : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
_snake_case : Optional[int] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_snake_case : Union[str, Any] = True
if "*" in mapped_key:
_snake_case : int = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : str = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Dict = """weight_v"""
elif "bias" in name:
_snake_case : List[str] = """bias"""
elif "weight" in name:
_snake_case : Optional[int] = """weight"""
elif "running_mean" in name:
_snake_case : int = """running_mean"""
elif "running_var" in name:
_snake_case : int = """running_var"""
elif "num_batches_tracked" in name:
_snake_case : Union[str, Any] = """num_batches_tracked"""
else:
_snake_case : Dict = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
_snake_case : Dict = name.split(""".""" )
_snake_case : Union[str, Any] = int(items[0] )
_snake_case : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_snake_case : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_snake_case : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_snake_case : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : List[Any]=None , snake_case__ : Tuple=None , snake_case__ : Dict=None , ):
"""simple docstring"""
if config_path is not None:
_snake_case : List[Any] = SpeechTaConfig.from_pretrained(snake_case__ )
else:
_snake_case : List[Any] = SpeechTaConfig()
if task == "s2t":
_snake_case : Union[str, Any] = config.max_text_positions
_snake_case : Optional[Any] = SpeechTaForSpeechToText(snake_case__ )
elif task == "t2s":
_snake_case : List[Any] = 18_76
_snake_case : Tuple = 6_00
_snake_case : Union[str, Any] = config.max_speech_positions
_snake_case : Dict = SpeechTaForTextToSpeech(snake_case__ )
elif task == "s2s":
_snake_case : List[str] = 18_76
_snake_case : List[str] = config.max_speech_positions
_snake_case : Dict = SpeechTaForSpeechToSpeech(snake_case__ )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
_snake_case : str = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_snake_case : Tuple = AddedToken("""<mask>""" , lstrip=snake_case__ , rstrip=snake_case__ )
_snake_case : Tuple = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_snake_case : List[Any] = SpeechTaFeatureExtractor()
_snake_case : Optional[Any] = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(snake_case__ )
_snake_case : Optional[Any] = torch.load(snake_case__ )
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case__ , snake_case__ )
model.save_pretrained(snake_case__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = image.size
_snake_case : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_snake_case : int = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
_snake_case : Tuple = np.array(snake_case__ ).astype(np.floataa ) / 2_55.0
_snake_case : str = image[None].transpose(0 , 3 , 1 , 2 )
_snake_case : str = torch.from_numpy(snake_case__ )
return 2.0 * image - 1.0
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Any, a_: VQModel, a_: UNetaDModel, a_: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
], ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self: Dict, a_: Union[torch.Tensor, PIL.Image.Image] = None, a_: Optional[int] = 1, a_: Optional[int] = 100, a_: Optional[float] = 0.0, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: Optional[str] = "pil", a_: bool = True, ):
'''simple docstring'''
if isinstance(a_, PIL.Image.Image ):
_snake_case : List[Any] = 1
elif isinstance(a_, torch.Tensor ):
_snake_case : int = image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a_ )}" )
if isinstance(a_, PIL.Image.Image ):
_snake_case : Optional[int] = preprocess(a_ )
_snake_case : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
_snake_case : Any = next(self.unet.parameters() ).dtype
_snake_case : int = randn_tensor(a_, generator=a_, device=self.device, dtype=a_ )
_snake_case : List[str] = image.to(device=self.device, dtype=a_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(a_, device=self.device )
_snake_case : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_snake_case : Optional[int] = {}
if accepts_eta:
_snake_case : Optional[Any] = eta
for t in self.progress_bar(a_ ):
# concat latents and low resolution image in the channel dimension.
_snake_case : List[str] = torch.cat([latents, image], dim=1 )
_snake_case : int = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
_snake_case : Any = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case : Dict = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VQVAE
_snake_case : Tuple = self.vqvae.decode(a_ ).sample
_snake_case : Dict = torch.clamp(a_, -1.0, 1.0 )
_snake_case : Optional[Any] = image / 2 + 0.5
_snake_case : List[Any] = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
_snake_case : List[str] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
A_ = '''docs/source/en/_toctree.yml'''
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : Any = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
_snake_case : Any = [key for key, value in counts.items() if value > 1]
_snake_case : Optional[Any] = []
for duplicate_key in duplicates:
_snake_case : Dict = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
def UpperCAmelCase__ (snake_case__ : Optional[int]=False ):
"""simple docstring"""
with open(snake_case__ , encoding="""utf-8""" ) as f:
_snake_case : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
_snake_case : int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_snake_case : List[Any] = content[api_idx]["""sections"""]
# Then to the model doc
_snake_case : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_snake_case : List[str] = api_doc[model_idx]["""sections"""]
_snake_case : List[str] = [(idx, section) for idx, section in enumerate(snake_case__ ) if """sections""" in section]
_snake_case : Optional[Any] = False
for idx, modality_doc in modalities_docs:
_snake_case : int = modality_doc["""sections"""]
_snake_case : Tuple = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
_snake_case : Optional[Any] = True
if overwrite:
_snake_case : Optional[int] = new_modality_doc
if diff:
if overwrite:
_snake_case : Optional[int] = model_doc
_snake_case : Union[str, Any] = api_doc
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
import operator as op
A_ = '''scaler.pt'''
A_ = '''pytorch_model'''
A_ = '''random_states'''
A_ = '''optimizer'''
A_ = '''scheduler'''
A_ = '''pytorch_model.bin'''
A_ = '''pytorch_model.bin.index.json'''
A_ = '''model.safetensors'''
A_ = '''model.safetensors.index.json'''
A_ = '''1.10.2'''
A_ = '''py38'''
A_ = '''4.17.0'''
A_ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
A_ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
A_ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
A_ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
A_ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
A_ = '''2.0.1'''
A_ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
A_ = ['''default''', '''reduce-overhead''', '''max-autotune''']
A_ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
A_ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
A_ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
A_ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: str ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = nn.Linear(3, 4 )
_snake_case : int = nn.BatchNormad(4 )
_snake_case : Tuple = nn.Linear(4, 5 )
def UpperCamelCase_ ( self: Dict, a_: int ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Tuple = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_, model.state_dict() )
_snake_case : str = os.path.join(a_, """index.json""" )
self.assertTrue(os.path.isfile(a_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
_snake_case : int = os.path.join(a_, f"{key}.dat" )
self.assertTrue(os.path.isfile(a_ ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
_snake_case : Any = torch.randn(2, 3, dtype=a_ )
with TemporaryDirectory() as tmp_dir:
_snake_case : List[Any] = offload_weight(a_, """weight""", a_, {} )
_snake_case : str = os.path.join(a_, """weight.dat""" )
self.assertTrue(os.path.isfile(a_ ) )
self.assertDictEqual(a_, {"""weight""": {"""shape""": [2, 3], """dtype""": str(a_ ).split(""".""" )[1]}} )
_snake_case : List[str] = load_offloaded_weight(a_, index["""weight"""] )
self.assertTrue(torch.equal(a_, a_ ) )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = ModelForTest()
_snake_case : int = model.state_dict()
_snake_case : str = {k: v for k, v in state_dict.items() if """linear2""" not in k}
_snake_case : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_, a_ )
_snake_case : Tuple = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_, weight_map[key] ) )
_snake_case : Dict = {k: v for k, v in state_dict.items() if """weight""" in k}
_snake_case : str = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_, a_ )
_snake_case : Optional[int] = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_, weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_, a_ )
# Duplicates are removed
_snake_case : Union[str, Any] = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_, weight_map[key] ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
_snake_case : Optional[Any] = extract_submodules_state_dict(a_, ["""a.1""", """a.2"""] )
self.assertDictEqual(a_, {"""a.1""": 0, """a.2""": 2} )
_snake_case : List[Any] = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
_snake_case : Tuple = extract_submodules_state_dict(a_, ["""a.1""", """a.2"""] )
self.assertDictEqual(a_, {"""a.1.a""": 0, """a.2.a""": 2} )
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ):
"""simple docstring"""
_snake_case : List[str] = len(snake_case__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , snake_case__ , snake_case__ )
# Print all the boards
for board in boards:
for column in board:
print(snake_case__ )
print("""""" )
print(len(snake_case__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[Any] = 0
while number > 0:
_snake_case : Optional[Any] = number % 10
sum_of_digits += last_digit
_snake_case : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCAmelCase__ (snake_case__ : int = 1_00 ):
"""simple docstring"""
_snake_case : Optional[int] = factorial(snake_case__ )
_snake_case : List[str] = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A_ = 5_00_00
A_ = 50_00
A_ , A_ = os.path.split(__file__)
A_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCAmelCase__ (snake_case__ : datasets.Dataset , snake_case__ : Dict ):
"""simple docstring"""
for i in range(snake_case__ ):
_snake_case : Dict = dataset[i]
@get_duration
def UpperCAmelCase__ (snake_case__ : datasets.Dataset , snake_case__ : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
for i in range(0 , len(snake_case__ ) , snake_case__ ):
_snake_case : Tuple = dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase__ (snake_case__ : datasets.Dataset , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
with dataset.formatted_as(type=snake_case__ ):
for i in range(snake_case__ ):
_snake_case : Union[str, Any] = dataset[i]
@get_duration
def UpperCAmelCase__ (snake_case__ : datasets.Dataset , snake_case__ : Any , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
with dataset.formatted_as(type=snake_case__ ):
for i in range(0 , snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = dataset[i : i + batch_size]
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_snake_case : int = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
_snake_case : Optional[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_snake_case : Optional[int] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_snake_case : Optional[int] = generate_example_dataset(
os.path.join(snake_case__ , """dataset.arrow""" ) , snake_case__ , num_examples=snake_case__ , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(snake_case__ ) )
_snake_case : List[Any] = func(snake_case__ , **snake_case__ )
print("""shuffling dataset""" )
_snake_case : Tuple = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(snake_case__ ) )
_snake_case : List[Any] = func(
snake_case__ , **snake_case__ )
with open(snake_case__ , """wb""" ) as f:
f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CpmAntTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : List[Any] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_snake_case : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_snake_case : Optional[Any] = """今天天气真好!"""
_snake_case : List[Any] = ["""今天""", """天气""", """真""", """好""", """!"""]
_snake_case : Tuple = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
_snake_case : List[Any] = """今天天气真好!"""
_snake_case : Union[str, Any] = [tokenizer.bos_token] + tokens
_snake_case : Dict = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
_snake_case : Dict = tokenizer.decode(a_ )
self.assertEqual(a_, a_ )
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ = '''▁'''
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = BigBirdTokenizer
lowercase__ = BigBirdTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : Union[str, Any] = self.tokenizer_class(a_, keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = """<s>"""
_snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<unk>""" )
self.assertEqual(vocab_keys[1], """<s>""" )
self.assertEqual(vocab_keys[-1], """[MASK]""" )
self.assertEqual(len(a_ ), 1_004 )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_000 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = """I was born in 92000, and this is falsé."""
_snake_case : Tuple = tokenizer.tokenize(a_ )
_snake_case : Tuple = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
_snake_case : List[str] = tokenizer.encode(a_, add_special_tokens=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
_snake_case : Tuple = self.get_rust_tokenizer()
_snake_case : Union[str, Any] = tokenizer.encode(a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = BigBirdTokenizer(a_, keep_accents=a_ )
_snake_case : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [285, 46, 10, 170, 382], )
_snake_case : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
_snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
@cached_property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """Hello World!"""
_snake_case : List[str] = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
_snake_case : Union[str, Any] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_snake_case : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : List[Any] = """ """.join(a_ )
_snake_case : Dict = self.big_tokenizer.encode_plus(a_, return_tensors="""pt""", return_token_type_ids=a_ )
_snake_case : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence], return_tensors="""pt""", return_token_type_ids=a_ )
_snake_case : Any = BigBirdConfig(attention_type="""original_full""" )
_snake_case : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
_snake_case : Optional[int] = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""google/bigbird-roberta-base""", revision="""215c99f1600e06f83acce68422f2035b2b5c3510""", )
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
from math import isqrt
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def UpperCAmelCase__ (snake_case__ : int = 10**6 ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : List[Any] = 1
_snake_case : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = LongformerTokenizer
lowercase__ = True
lowercase__ = LongformerTokenizerFast
lowercase__ = True
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_snake_case : List[str] = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : Any = {"""unk_token""": """<unk>"""}
_snake_case : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Union[str, Any], **a_: List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: str, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : int = """lower newer"""
_snake_case : Optional[int] = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : int = """lower newer"""
_snake_case : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_ ) # , add_prefix_space=True)
self.assertListEqual(a_, a_ )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""", add_special_tokens=a_ ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""", add_special_tokens=a_ ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
_snake_case : Union[str, Any] = tokenizer.encode("""sequence builders""", add_special_tokens=a_ )
_snake_case : Tuple = tokenizer.encode("""multi-sequence build""", add_special_tokens=a_ )
_snake_case : Tuple = tokenizer.encode(
"""sequence builders""", add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = tokenizer.encode(
"""sequence builders""", """multi-sequence build""", add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(a_ )
_snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_, a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : Dict = """Encode this sequence."""
_snake_case : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
_snake_case : Union[str, Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a_, a_ )
_snake_case : Optional[int] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a_, a_ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_snake_case : Union[str, Any] = tokenizer.encode(a_, add_special_tokens=a_ )
_snake_case : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a_, a_ )
# Testing spaces after special tokens
_snake_case : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(a_, lstrip=a_, rstrip=a_ )} ) # mask token has a left space
_snake_case : str = tokenizer.convert_tokens_to_ids(a_ )
_snake_case : int = """Encode <mask> sequence"""
_snake_case : Tuple = """Encode <mask>sequence"""
_snake_case : List[str] = tokenizer.encode(a_ )
_snake_case : Optional[int] = encoded.index(a_ )
_snake_case : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a_, a_ )
_snake_case : Union[str, Any] = tokenizer.encode(a_ )
_snake_case : Optional[int] = encoded.index(a_ )
_snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a_, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : str = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : Dict = self.tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[str] = """A, <mask> AllenNLP sentence."""
_snake_case : Tuple = tokenizer_r.encode_plus(a_, add_special_tokens=a_, return_token_type_ids=a_ )
_snake_case : Optional[int] = tokenizer_p.encode_plus(a_, add_special_tokens=a_, return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ), sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ), sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ), )
_snake_case : int = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_snake_case : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a_, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
a_, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ )
_snake_case : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""], a_ )
self.assertEqual(post_processor_state["""add_prefix_space"""], a_ )
self.assertEqual(post_processor_state["""trim_offsets"""], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : int = f"{text_of_1_token} {text_of_1_token}"
_snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ )
_snake_case : Dict = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ )
_snake_case : str = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ )
_snake_case : Dict = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ), len(a_ ) + 1 + len(a_ )), )
_snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ )
_snake_case : Union[str, Any] = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ), len(a_ ) + 1 + len(a_ )), )
_snake_case : Tuple = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ )
_snake_case : Union[str, Any] = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
_snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ )
_snake_case : Any = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )), )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, add_prefix_space=a_, trim_offsets=a_ )
_snake_case : Optional[Any] = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )), )
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "luke"
def __init__( self: Dict, a_: str=50_267, a_: Tuple=500_000, a_: Tuple=768, a_: Optional[Any]=256, a_: Optional[Any]=12, a_: Tuple=12, a_: Union[str, Any]=3_072, a_: Any="gelu", a_: Optional[int]=0.1, a_: Optional[int]=0.1, a_: Tuple=512, a_: Optional[Any]=2, a_: Union[str, Any]=0.02, a_: Union[str, Any]=1E-12, a_: Any=True, a_: Any=None, a_: Tuple=1, a_: Optional[int]=0, a_: Union[str, Any]=2, **a_: List[Any], ):
'''simple docstring'''
super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
_snake_case : Optional[int] = vocab_size
_snake_case : Any = entity_vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Union[str, Any] = entity_emb_size
_snake_case : str = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : str = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : List[str] = initializer_range
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = use_entity_aware_attention
_snake_case : List[Any] = classifier_dropout
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : Tuple = set(range(3 , snake_case__ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) )
_snake_case : Union[str, Any] = [float(snake_case__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
_snake_case : List[Any] = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(snake_case__ )
DownloadCommand.register_subcommand(snake_case__ )
EnvironmentCommand.register_subcommand(snake_case__ )
RunCommand.register_subcommand(snake_case__ )
ServeCommand.register_subcommand(snake_case__ )
UserCommands.register_subcommand(snake_case__ )
AddNewModelCommand.register_subcommand(snake_case__ )
AddNewModelLikeCommand.register_subcommand(snake_case__ )
LfsCommands.register_subcommand(snake_case__ )
PTtoTFCommand.register_subcommand(snake_case__ )
# Let's go
_snake_case : Union[str, Any] = parser.parse_args()
if not hasattr(snake_case__ , """func""" ):
parser.print_help()
exit(1 )
# Run
_snake_case : Optional[Any] = args.func(snake_case__ )
service.run()
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: str, a_: List[str]=None, a_: str=None, a_: Any=None, a_: List[Any]="<|endoftext|>", a_: int="<|endoftext|>", a_: List[Any]="<|endoftext|>", a_: int=False, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : Union[str, Any] = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : str = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : List[Any] = pre_tok_class(**a_ )
_snake_case : List[str] = add_prefix_space
def UpperCamelCase_ ( self: List[Any], *a_: Tuple, **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], *a_: Dict, **a_: Optional[int] ):
'''simple docstring'''
_snake_case : int = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: List[str], a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: Optional[int], ):
'''simple docstring'''
_snake_case : str = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : int = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: List[Any], a_: List[str], a_: str ):
'''simple docstring'''
def find_re(a_: Union[str, Any], a_: List[Any], a_: List[str] ):
_snake_case : str = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Optional[int] = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[str] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[str] = completion[: prints[1].start()]
_snake_case : List[Any] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : Optional[int] = completion[: defs[1].start()]
_snake_case : str = 0
_snake_case : str = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A_ = False
try:
A_ = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: str = None, a_: list = [] ):
'''simple docstring'''
_snake_case : Optional[int] = 0
_snake_case : int = choices
_snake_case : Optional[int] = prompt
if sys.platform == "win32":
_snake_case : Any = """*"""
else:
_snake_case : List[str] = """➔ """
def UpperCamelCase_ ( self: List[str], a_: Optional[int], a_: str = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, a_ )
else:
forceWrite(self.choices[index], a_ )
def UpperCamelCase_ ( self: List[str], a_: int ):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(a_ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def UpperCamelCase_ ( self: Optional[Any], a_: Direction, a_: int = 1 ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a_ )
move_cursor(a_, direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position, """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position, """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a_ )] for number in range(10 )] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = int(chr(self.current_selection ) )
_snake_case : Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP, -movement )
elif self.position < index:
self.move_direction(Direction.DOWN, a_ )
else:
return
else:
return
def UpperCamelCase_ ( self: Tuple, a_: int = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""", """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""", """\n""" )
_snake_case : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a_ )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position, """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_snake_case : Optional[Any] = int(builtins.input() )
except ValueError:
_snake_case : List[str] = default_choice
else:
_snake_case : int = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1, """UP""" )
clear_line()
self.write_choice(a_, """\n""" )
return choice
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase:
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *a_: Dict, **a_: Union[str, Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@require_torch
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""", )
_snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_snake_case : Any = image_classifier(a_, candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ), [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
], )
_snake_case : str = image_classifier([image] * 5, candidate_labels=["""A""", """B""", """C"""], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
], )
@require_tf
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""", framework="""tf""" )
_snake_case : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_snake_case : str = image_classifier(a_, candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(a_ ), [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}], )
_snake_case : Any = image_classifier([image] * 5, candidate_labels=["""A""", """B""", """C"""], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
[
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
{"""score""": 0.333, """label""": ANY(a_ )},
],
], )
@slow
@require_torch
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = pipeline(
task="""zero-shot-image-classification""", model="""openai/clip-vit-base-patch32""", )
# This is an image of 2 cats with remotes and no planes
_snake_case : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_snake_case : Any = image_classifier(a_, candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(a_ ), [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
], )
_snake_case : str = image_classifier([image] * 5, candidate_labels=["""cat""", """plane""", """remote"""], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5, )
@slow
@require_tf
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = pipeline(
task="""zero-shot-image-classification""", model="""openai/clip-vit-base-patch32""", framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_snake_case : Tuple = image_classifier(a_, candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(a_ ), [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
], )
_snake_case : Any = image_classifier([image] * 5, candidate_labels=["""cat""", """plane""", """remote"""], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5, )
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = [10, 20, 30, 40, 50, 60]
_snake_case : List[str] = [2, 4, 6, 8, 10, 12]
_snake_case : Tuple = 100
self.assertEqual(kp.calc_profit(a_, a_, a_ ), 210 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.assertRaisesRegex(a_, """max_weight must greater than zero.""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.assertRaisesRegex(a_, """Weight can not be negative.""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.assertRaisesRegex(a_, """Profit can not be negative.""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.assertRaisesRegex(a_, """max_weight must greater than zero.""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.assertRaisesRegex(
a_, """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
import datasets
from .evaluate import evaluate
A_ = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
A_ = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
A_ = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ), codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""], reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""], )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: int ):
'''simple docstring'''
_snake_case : int = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_snake_case : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_snake_case : int = evaluate(dataset=a_, predictions=a_ )
return score
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["pixel_values"]
def __init__( self: str, a_: bool = True, a_: Dict[str, int] = None, a_: PILImageResampling = PILImageResampling.BICUBIC, a_: bool = True, a_: Dict[str, int] = None, a_: bool = True, a_: Union[int, float] = 1 / 255, a_: bool = True, a_: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, a_: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **a_: str, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
_snake_case : str = get_size_dict(a_, default_to_square=a_ )
_snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case : List[str] = get_size_dict(a_, param_name="""crop_size""" )
_snake_case : int = do_resize
_snake_case : str = size
_snake_case : List[Any] = resample
_snake_case : List[Any] = do_center_crop
_snake_case : Any = crop_size
_snake_case : Union[str, Any] = do_rescale
_snake_case : str = rescale_factor
_snake_case : Union[str, Any] = do_normalize
_snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_snake_case : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self: Tuple, a_: np.ndarray, a_: Dict[str, int], a_: PILImageResampling = PILImageResampling.BICUBIC, a_: Optional[Union[str, ChannelDimension]] = None, **a_: List[Any], ):
'''simple docstring'''
_snake_case : List[Any] = get_size_dict(a_, default_to_square=a_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_snake_case : List[str] = int((256 / 224) * size["""shortest_edge"""] )
_snake_case : Tuple = get_resize_output_image_size(a_, size=a_, default_to_square=a_ )
_snake_case : Dict = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
a_, size=(size_dict["""height"""], size_dict["""width"""]), resample=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: List[Any], a_: np.ndarray, a_: Dict[str, int], a_: Optional[Union[str, ChannelDimension]] = None, **a_: Any, ):
'''simple docstring'''
_snake_case : Dict = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(a_, size=(size["""height"""], size["""width"""]), data_format=a_, **a_ )
def UpperCamelCase_ ( self: Dict, a_: np.ndarray, a_: Union[int, float], a_: Optional[Union[str, ChannelDimension]] = None, **a_: str, ):
'''simple docstring'''
return rescale(a_, scale=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: np.ndarray, a_: Union[float, List[float]], a_: Union[float, List[float]], a_: Optional[Union[str, ChannelDimension]] = None, **a_: Tuple, ):
'''simple docstring'''
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: Dict, a_: ImageInput, a_: Optional[bool] = None, a_: Optional[Dict[str, int]] = None, a_: PILImageResampling = None, a_: Optional[bool] = None, a_: Optional[Dict[str, int]] = None, a_: Optional[bool] = None, a_: Optional[float] = None, a_: Optional[bool] = None, a_: Optional[Union[float, Iterable[float]]] = None, a_: Optional[Union[float, Iterable[float]]] = None, a_: Optional[TensorType] = None, a_: ChannelDimension = ChannelDimension.FIRST, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : Union[str, Any] = resample if resample is not None else self.resample
_snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : Dict = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
_snake_case : int = image_std if image_std is not None else self.image_std
_snake_case : Union[str, Any] = size if size is not None else self.size
_snake_case : Tuple = get_size_dict(a_, default_to_square=a_ )
_snake_case : str = crop_size if crop_size is not None else self.crop_size
_snake_case : int = get_size_dict(a_, param_name="""crop_size""" )
_snake_case : Optional[Any] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_snake_case : Any = [to_numpy_array(a_ ) for image in images]
if do_resize:
_snake_case : List[Any] = [self.resize(a_, a_, a_ ) for image in images]
if do_center_crop:
_snake_case : str = [self.center_crop(a_, a_ ) for image in images]
if do_rescale:
_snake_case : Optional[Any] = [self.rescale(a_, a_ ) for image in images]
if do_normalize:
_snake_case : Optional[int] = [self.normalize(a_, a_, a_ ) for image in images]
_snake_case : Tuple = [to_channel_dimension_format(a_, a_ ) for image in images]
_snake_case : Tuple = {"""pixel_values""": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: List[str], *a_: Optional[Any], **a_: Optional[int] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[Any], *a_: List[str], **a_: Optional[int] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: Tuple, **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Any, *a_: Union[str, Any], **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: Optional[int], **a_: List[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[str], *a_: List[str], **a_: int ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: List[Any], *a_: Optional[int], **a_: List[Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: Optional[int], **a_: Optional[int] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Any, *a_: Any, **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Dict, *a_: str, **a_: Tuple ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Any, *a_: Dict, **a_: Tuple ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Optional[int], *a_: Union[str, Any], **a_: int ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], *a_: List[str], **a_: Optional[int] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[str], *a_: str, **a_: int ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: int, *a_: str, **a_: List[Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: str, *a_: int, **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: int, **a_: Optional[int] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Optional[Any], *a_: List[str], **a_: Dict ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[str], *a_: List[str], **a_: Tuple ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Any, *a_: Dict, **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: List[Any], *a_: Any, **a_: Dict ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: str, *a_: List[Any], **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: str, *a_: int, **a_: str ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: str, *a_: Optional[Any], **a_: List[Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Dict, *a_: Dict, **a_: Tuple ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Tuple, **a_: Dict ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: List[str], *a_: Union[str, Any], **a_: Any ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[str], *a_: Dict, **a_: str ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: int, *a_: Dict, **a_: str ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Optional[Any], *a_: str, **a_: str ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: Tuple, **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Optional[int], *a_: Dict, **a_: List[Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[Any], *a_: List[Any], **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Any, *a_: Tuple, **a_: Dict ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: int, *a_: Optional[Any], **a_: int ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], *a_: Tuple, **a_: List[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: str, *a_: int, **a_: List[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
A_ = ''''''
A_ = ''''''
A_ = ''''''
A_ = ''''''
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : str = tweepy.OAuthHandler(snake_case__ , snake_case__ )
auth.set_access_token(snake_case__ , snake_case__ )
_snake_case : Optional[int] = tweepy.API(snake_case__ )
# initialize a list to hold all the tweepy Tweets
_snake_case : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=snake_case__ , count=2_00 )
# save most recent tweets
alltweets.extend(snake_case__ )
# save the id of the oldest tweet less one
_snake_case : Optional[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case__ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : List[Any] = api.user_timeline(
screen_name=snake_case__ , count=2_00 , max_id=snake_case__ )
# save most recent tweets
alltweets.extend(snake_case__ )
# update the id of the oldest tweet less one
_snake_case : Optional[int] = alltweets[-1].id - 1
print(F"...{len(snake_case__ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : Optional[int] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , """w""" ) as f:
_snake_case : List[str] = csv.writer(snake_case__ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(snake_case__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
class lowercase:
'''simple docstring'''
def __init__( self: Optional[int], a_: int = 0 ):
'''simple docstring'''
_snake_case : str = key
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int ):
'''simple docstring'''
assert isinstance(a_, a_ ) and isinstance(a_, a_ )
_snake_case : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(a_ ) ^ key ) for ch in content]
def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: int ):
'''simple docstring'''
assert isinstance(a_, a_ ) and isinstance(a_, a_ )
_snake_case : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(a_ ) ^ key ) for ch in content]
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int = 0 ):
'''simple docstring'''
assert isinstance(a_, a_ ) and isinstance(a_, a_ )
_snake_case : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_snake_case : List[str] = """"""
for ch in content:
ans += chr(ord(a_ ) ^ key )
return ans
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int = 0 ):
'''simple docstring'''
assert isinstance(a_, a_ ) and isinstance(a_, a_ )
_snake_case : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_snake_case : str = """"""
for ch in content:
ans += chr(ord(a_ ) ^ key )
return ans
def UpperCamelCase_ ( self: Dict, a_: str, a_: int = 0 ):
'''simple docstring'''
assert isinstance(a_, a_ ) and isinstance(a_, a_ )
try:
with open(a_ ) as fin, open("""encrypt.out""", """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(a_, a_ ) )
except OSError:
return False
return True
def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: int ):
'''simple docstring'''
assert isinstance(a_, a_ ) and isinstance(a_, a_ )
try:
with open(a_ ) as fin, open("""decrypt.out""", """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(a_, a_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Dict ):
'''simple docstring'''
self.test()
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = 0
_snake_case : List[Any] = False
while not completed:
if counter == 1:
self.reset()
_snake_case : List[str] = self.advance()
if not self.does_advance(a_ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
_snake_case : Union[str, Any] = self.update(a_ )
counter += 1
if counter > 10_000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: Optional[int], a_: int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: Optional[int], a_: int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: List[str], a_: Tuple=False ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Tuple, a_: List[int] ):
'''simple docstring'''
super(a_, self ).__init__()
if not isinstance(a_, a_ ) or len(a_ ) == 0:
raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(a_, a_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
_snake_case : Optional[int] = token_ids
_snake_case : Dict = len(self.token_ids )
_snake_case : Union[str, Any] = -1 # the index of the currently fulfilled step
_snake_case : str = False
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self: List[Any], a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self: List[str], a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}" )
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : List[Any] = False
if self.does_advance(a_ ):
self.fulfilled_idx += 1
_snake_case : List[str] = True
if self.fulfilled_idx == (self.seqlen - 1):
_snake_case : str = True
_snake_case : Optional[int] = completed
else:
# failed to make progress.
_snake_case : Union[str, Any] = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = False
_snake_case : Dict = 0
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self: Union[str, Any], a_: str=False ):
'''simple docstring'''
_snake_case : Dict = PhrasalConstraint(self.token_ids )
if stateful:
_snake_case : List[Any] = self.seqlen
_snake_case : str = self.fulfilled_idx
_snake_case : Union[str, Any] = self.completed
return new_constraint
class lowercase:
'''simple docstring'''
def __init__( self: int, a_: List[List[int]], a_: int=True ):
'''simple docstring'''
_snake_case : List[str] = max([len(a_ ) for one in nested_token_ids] )
_snake_case : int = {}
for token_ids in nested_token_ids:
_snake_case : List[str] = root
for tidx, token_id in enumerate(a_ ):
if token_id not in level:
_snake_case : int = {}
_snake_case : List[Any] = level[token_id]
if no_subsets and self.has_subsets(a_, a_ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f" {nested_token_ids}." )
_snake_case : int = root
def UpperCamelCase_ ( self: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.trie
for current_token in current_seq:
_snake_case : Any = start[current_token]
_snake_case : Any = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self: List[Any], a_: Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.next_tokens(a_ )
return len(a_ ) == 0
def UpperCamelCase_ ( self: Dict, a_: List[Any] ):
'''simple docstring'''
_snake_case : int = list(root.values() )
if len(a_ ) == 0:
return 1
else:
return sum([self.count_leaves(a_ ) for nn in next_nodes] )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: int ):
'''simple docstring'''
_snake_case : str = self.count_leaves(a_ )
return len(a_ ) != leaf_count
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Tuple, a_: List[List[int]] ):
'''simple docstring'''
super(a_, self ).__init__()
if not isinstance(a_, a_ ) or len(a_ ) == 0:
raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(a_, a_ ) for token_ids in nested_token_ids ):
raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(a_, a_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
_snake_case : Optional[Any] = DisjunctiveTrie(a_ )
_snake_case : Optional[int] = nested_token_ids
_snake_case : Tuple = self.trie.max_height
_snake_case : List[str] = []
_snake_case : Union[str, Any] = False
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = self.trie.next_tokens(self.current_seq )
if len(a_ ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self: Tuple, a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}" )
_snake_case : Optional[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self: Any, a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}" )
_snake_case : Any = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = False
if self.does_advance(a_ ):
self.current_seq.append(a_ )
_snake_case : Optional[Any] = True
else:
_snake_case : Tuple = True
self.reset()
_snake_case : Any = self.trie.reached_leaf(self.current_seq )
_snake_case : Tuple = completed
return stepped, completed, reset
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = False
_snake_case : Tuple = []
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self: Dict, a_: List[Any]=False ):
'''simple docstring'''
_snake_case : Union[str, Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
_snake_case : Tuple = self.seqlen
_snake_case : List[str] = self.current_seq
_snake_case : List[Any] = self.completed
return new_constraint
class lowercase:
'''simple docstring'''
def __init__( self: Any, a_: List[Constraint] ):
'''simple docstring'''
_snake_case : Any = constraints
# max # of steps required to fulfill a given constraint
_snake_case : Optional[int] = max([c.seqlen for c in constraints] )
_snake_case : Optional[Any] = len(a_ )
_snake_case : List[Any] = False
self.init_state()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = []
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = [constraint.copy(stateful=a_ ) for constraint in self.constraints]
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_snake_case : str = constraint.advance()
if isinstance(a_, a_ ):
token_list.append(a_ )
elif isinstance(a_, a_ ):
token_list.extend(a_ )
else:
_snake_case : List[str] = self.inprogress_constraint.advance()
if isinstance(a_, a_ ):
token_list.append(a_ )
elif isinstance(a_, a_ ):
token_list.extend(a_ )
if len(a_ ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self: int, a_: Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_snake_case : int = self.add(a_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self: List[Any], a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`." )
_snake_case : List[str] = False, False
if self.completed:
_snake_case : Dict = True
_snake_case : List[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_snake_case : Optional[int] = self.inprogress_constraint.update(a_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_ ) )
_snake_case : Union[str, Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_snake_case : Dict = None
if len(self.pending_constraints ) == 0:
# we're done!
_snake_case : Union[str, Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a_ ):
_snake_case : Any = pending_constraint.update(a_ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(a_ )
_snake_case : Optional[Any] = None
if not complete and stepped:
_snake_case : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_snake_case : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_snake_case : List[str] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self: Dict, a_: int=True ):
'''simple docstring'''
_snake_case : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_snake_case : List[Any] = [
constraint.copy(stateful=a_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_snake_case : Dict = self.inprogress_constraint.copy(stateful=a_ )
_snake_case : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
import pytest
A_ = '''__dummy_dataset1__'''
A_ = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCAmelCase__ ():
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase__ ():
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[str] = dataset_loading_script_name
_snake_case : Optional[Any] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=snake_case__ )
_snake_case : Tuple = script_dir / F"{script_name}.py"
with open(snake_case__ , """w""" ) as f:
f.write(snake_case__ )
return str(snake_case__ )
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Union[str, Any] = SavedModel()
_snake_case : Optional[Any] = []
with open(os.path.join(snake_case__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
_snake_case : Any = json.load(snake_case__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(snake_case__ )] )
with open(snake_case__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
_snake_case : List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_snake_case : Tuple = sorted(snake_case__ )
_snake_case : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(snake_case__ )
if strict and len(snake_case__ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(snake_case__ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*snake_case__ , sep="""\n""" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
A_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
class lowercase:
'''simple docstring'''
def __init__( self: Union[str, Any], a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = val
_snake_case : List[Any] = None
_snake_case : Optional[Any] = None
def UpperCamelCase_ ( self: int, a_: List[str] ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
_snake_case : List[Any] = Node(a_ )
else:
self.left.insert(a_ )
elif val > self.val:
if self.right is None:
_snake_case : Any = Node(a_ )
else:
self.right.insert(a_ )
else:
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if root:
inorder(root.left , snake_case__ )
res.append(root.val )
inorder(root.right , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
if len(snake_case__ ) == 0:
return arr
_snake_case : List[str] = Node(arr[0] )
for i in range(1 , len(snake_case__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
_snake_case : List[Any] = []
inorder(snake_case__ , snake_case__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: int, a_: Any=None, a_: Optional[int]=None, a_: Dict=None, **a_: Dict ):
'''simple docstring'''
if tokenize_kwargs is None:
_snake_case : Union[str, Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
_snake_case : List[str] = truncation
_snake_case : Dict = tokenize_kwargs
_snake_case : Any = {}
if return_tensors is not None:
_snake_case : Any = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self: Dict, a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.framework
_snake_case : Optional[Any] = self.tokenizer(a_, return_tensors=a_, **a_ )
return model_inputs
def UpperCamelCase_ ( self: Optional[int], a_: str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model(**a_ )
return model_outputs
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: str=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self: Tuple, *a_: Union[str, Any], **a_: Any ):
'''simple docstring'''
return super().__call__(*a_, **a_ )
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A_ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
A_ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
A_ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
return float((preds == labels).mean() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[Any]="binary" ):
"""simple docstring"""
_snake_case : Any = simple_accuracy(snake_case__ , snake_case__ )
_snake_case : int = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ , average=snake_case__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = {}
for id_pred, label in zip(snake_case__ , snake_case__ ):
_snake_case : Tuple = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
_snake_case : str = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_snake_case : Any = [(pred, label)]
_snake_case : Dict = [], []
for question, preds_labels in question_map.items():
_snake_case : Union[str, Any] = zip(*snake_case__ )
_snake_case : int = fa_score(y_true=snake_case__ , y_pred=snake_case__ , average="""macro""" )
fas.append(snake_case__ )
_snake_case : Tuple = int(sum(pred == label for pred, label in preds_labels ) == len(snake_case__ ) )
ems.append(snake_case__ )
_snake_case : List[str] = float(sum(snake_case__ ) / len(snake_case__ ) )
_snake_case : Union[str, Any] = sum(snake_case__ ) / len(snake_case__ )
_snake_case : Union[str, Any] = float(fa_score(y_true=snake_case__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), codebase_urls=[], reference_urls=[], format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None, )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCamelCase_ ( self: int, a_: Tuple, a_: int ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(a_, a_ )}
elif self.config_name == "cb":
return acc_and_fa(a_, a_, fa_avg="""macro""" )
elif self.config_name == "record":
_snake_case : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
_snake_case : int = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(a_, a_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(a_, a_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(a_, a_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = StableDiffusionXLImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowercase__ = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), attention_head_dim=(2, 4), use_linear_projection=a_, addition_embed_type="""text_time""", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
_snake_case : List[str] = EulerDiscreteScheduler(
beta_start=0.00_085, beta_end=0.012, steps_offset=1, beta_schedule="""scaled_linear""", timestep_spacing="""leading""", )
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act="""gelu""", projection_dim=32, )
_snake_case : Dict = CLIPTextModel(a_ )
_snake_case : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""", local_files_only=a_ )
_snake_case : Tuple = CLIPTextModelWithProjection(a_ )
_snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""", local_files_only=a_ )
_snake_case : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: Dict=0 ):
'''simple docstring'''
_snake_case : int = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ )
_snake_case : Optional[int] = image / 2 + 0.5
if str(a_ ).startswith("""mps""" ):
_snake_case : Optional[int] = torch.manual_seed(a_ )
else:
_snake_case : str = torch.Generator(device=a_ ).manual_seed(a_ )
_snake_case : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Tuple = self.get_dummy_components()
_snake_case : List[Any] = StableDiffusionXLImgaImgPipeline(**a_ )
_snake_case : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_snake_case : str = self.get_dummy_inputs(a_ )
_snake_case : List[Any] = sd_pipe(**a_ ).images
_snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : List[Any] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = StableDiffusionXLImgaImgPipeline(**a_ )
_snake_case : Union[str, Any] = sd_pipe.to(a_ )
_snake_case : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
# forward without prompt embeds
_snake_case : str = self.get_dummy_inputs(a_ )
_snake_case : Any = 3 * ["""this is a negative prompt"""]
_snake_case : Optional[Any] = negative_prompt
_snake_case : Optional[Any] = 3 * [inputs["""prompt"""]]
_snake_case : Any = sd_pipe(**a_ )
_snake_case : List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_snake_case : Optional[int] = self.get_dummy_inputs(a_ )
_snake_case : Any = 3 * ["""this is a negative prompt"""]
_snake_case : List[str] = 3 * [inputs.pop("""prompt""" )]
(
_snake_case
) : str = sd_pipe.encode_prompt(a_, negative_prompt=a_ )
_snake_case : Any = sd_pipe(
**a_, prompt_embeds=a_, negative_prompt_embeds=a_, pooled_prompt_embeds=a_, negative_pooled_prompt_embeds=a_, )
_snake_case : Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: List[str], a_: str, a_: Dict="cpu", a_: str=torch.floataa, a_: str=0 ):
'''simple docstring'''
_snake_case : str = torch.Generator(device=a_ ).manual_seed(a_ )
_snake_case : Any = np.random.RandomState(a_ ).standard_normal((1, 4, 64, 64) )
_snake_case : str = torch.from_numpy(a_ ).to(device=a_, dtype=a_ )
_snake_case : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : int = self.get_inputs(a_ )
_snake_case : Dict = pipe(**a_ ).images
_snake_case : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_snake_case : Optional[int] = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3 , snake_case__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A_ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[Any], a_: str, a_: bool, a_: str = None, a_: list = None ):
'''simple docstring'''
_snake_case : List[Any] = None
_snake_case : Union[str, Any] = os.path.abspath(os.path.join("""examples""", """by_feature""" ) )
_snake_case : List[Any] = os.path.abspath("""examples""" )
for item in os.listdir(a_ ):
if item not in EXCLUDE_EXAMPLES:
_snake_case : List[Any] = os.path.join(a_, a_ )
if os.path.isfile(a_ ) and ".py" in item_path:
with self.subTest(
tested_script=a_, feature_script=a_, tested_section="""main()""" if parser_only else """training_function()""", ):
_snake_case : Dict = compare_against_test(
os.path.join(a_, a_ ), a_, a_, a_ )
_snake_case : Any = """\n""".join(a_ )
if special_strings is not None:
for string in special_strings:
_snake_case : int = diff.replace(a_, """""" )
self.assertEqual(a_, """""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
self.one_complete_example("""complete_nlp_example.py""", a_ )
self.one_complete_example("""complete_nlp_example.py""", a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = os.path.abspath(os.path.join("""examples""", """cv_example.py""" ) )
_snake_case : Optional[Any] = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""", a_, a_, a_ )
self.one_complete_example("""complete_cv_example.py""", a_, a_, a_ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = False
@classmethod
def UpperCamelCase_ ( cls: Any ):
'''simple docstring'''
super().setUpClass()
_snake_case : int = tempfile.mkdtemp()
_snake_case : Any = os.path.join(cls._tmpdir, """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_snake_case : Any = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def UpperCamelCase_ ( cls: Dict ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, """epoch_0""" ) ) )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
_snake_case : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, """step_2""" ) ) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n ".split()
_snake_case : Optional[Any] = run_command(self._launch_args + testargs, return_stdout=a_ )
self.assertNotIn("""epoch 0:""", a_ )
self.assertIn("""epoch 1:""", a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n ".split()
_snake_case : Dict = run_command(self._launch_args + testargs, return_stdout=a_ )
if torch.cuda.is_available():
_snake_case : Dict = torch.cuda.device_count()
else:
_snake_case : Optional[Any] = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""", a_ )
self.assertIn("""epoch 1:""", a_ )
else:
self.assertIn("""epoch 0:""", a_ )
self.assertIn("""epoch 1:""", a_ )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ, {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
_snake_case : Optional[int] = run_command(self._launch_args + testargs, return_stdout=a_ )
_snake_case : List[Any] = re.findall("""({.+})""", a_ )
_snake_case : List[Any] = [r for r in results if """accuracy""" in r][-1]
_snake_case : List[Any] = ast.literal_eval(a_ )
self.assertGreaterEqual(results["""accuracy"""], 0.75 )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
_snake_case : Dict = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(a_, """tracking""" ) ) )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : float , snake_case__ : float , snake_case__ : float ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
import sys
import turtle
def UpperCAmelCase__ (snake_case__ : tuple[float, float] , snake_case__ : tuple[float, float] ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase__ (snake_case__ : tuple[float, float] , snake_case__ : tuple[float, float] , snake_case__ : tuple[float, float] , snake_case__ : int , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
A_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
A_ = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ReformerTokenizer
lowercase__ = ReformerTokenizerFast
lowercase__ = True
lowercase__ = False
lowercase__ = True
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : Any = ReformerTokenizer(a_, keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = """<s>"""
_snake_case : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<unk>""" )
self.assertEqual(vocab_keys[1], """<s>""" )
self.assertEqual(vocab_keys[-1], """j""" )
self.assertEqual(len(a_ ), 1_000 )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_000 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_rust_tokenizer()
_snake_case : Tuple = """I was born in 92000, and this is falsé."""
_snake_case : List[Any] = tokenizer.tokenize(a_ )
_snake_case : Optional[int] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
_snake_case : Optional[int] = tokenizer.encode(a_, add_special_tokens=a_ )
_snake_case : Dict = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
_snake_case : Tuple = self.get_rust_tokenizer()
_snake_case : List[str] = tokenizer.encode(a_ )
_snake_case : List[str] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: str=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = ("""This is a simple input""", """This is a pair""")
_snake_case : int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = ReformerTokenizer(a_, keep_accents=a_ )
_snake_case : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [285, 46, 10, 170, 382], )
_snake_case : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = """Hello World!"""
_snake_case : Tuple = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_snake_case : List[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : Tuple = """ """.join(a_ )
_snake_case : Union[str, Any] = self.big_tokenizer.encode_plus(a_, return_tensors="""pt""" )
_snake_case : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="""pt""" )
_snake_case : List[str] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Optional[Any] = encoded_sequence["""input_ids"""].shape
_snake_case : Dict = ReformerModel(a_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Dict = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""google/reformer-crime-and-punishment""", revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""", padding=a_, sequences=a_, )
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
import numpy as np
A_ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class lowercase:
'''simple docstring'''
def __init__( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = np.array(a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.where(letter == self.SQUARE )
_snake_case : Dict = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCamelCase_ ( self: Union[str, Any], a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCamelCase_ ( self: Any, a_: str ):
'''simple docstring'''
_snake_case : int = message.lower()
_snake_case : str = message.replace(""" """, """""" )
_snake_case : Optional[int] = message.replace("""j""", """i""" )
_snake_case : List[str] = np.empty((2, len(a_ )) )
for letter_index in range(len(a_ ) ):
_snake_case : List[Any] = self.letter_to_numbers(message[letter_index] )
_snake_case : Union[str, Any] = numbers[0]
_snake_case : Optional[int] = numbers[1]
_snake_case : Tuple = first_step.reshape(2 * len(a_ ) )
_snake_case : Optional[Any] = """"""
for numbers_index in range(len(a_ ) ):
_snake_case : int = int(second_step[numbers_index * 2] )
_snake_case : Tuple = int(second_step[(numbers_index * 2) + 1] )
_snake_case : Optional[int] = self.numbers_to_letter(a_, a_ )
_snake_case : int = encoded_message + letter
return encoded_message
def UpperCamelCase_ ( self: Any, a_: str ):
'''simple docstring'''
_snake_case : List[Any] = message.lower()
message.replace(""" """, """""" )
_snake_case : Any = np.empty(2 * len(a_ ) )
for letter_index in range(len(a_ ) ):
_snake_case : List[str] = self.letter_to_numbers(message[letter_index] )
_snake_case : str = numbers[0]
_snake_case : Dict = numbers[1]
_snake_case : int = first_step.reshape((2, len(a_ )) )
_snake_case : Optional[Any] = """"""
for numbers_index in range(len(a_ ) ):
_snake_case : Union[str, Any] = int(second_step[0, numbers_index] )
_snake_case : Tuple = int(second_step[1, numbers_index] )
_snake_case : List[str] = self.numbers_to_letter(a_, a_ )
_snake_case : List[str] = decoded_message + letter
return decoded_message
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
from math import ceil, sqrt
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : List[Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_snake_case : Any = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_snake_case : Dict = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.